input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
resources.JobTemplate()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_job_template(
parent="parent_value",
job_template=resources.JobTemplate(name="name_value"),
job_template_id="job_template_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].job_template == resources.JobTemplate(name="name_value")
assert args[0].job_template_id == "job_template_id_value"
@pytest.mark.asyncio
async def test_create_job_template_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_job_template(
services.CreateJobTemplateRequest(),
parent="parent_value",
job_template=resources.JobTemplate(name="name_value"),
job_template_id="job_template_id_value",
)
def test_list_job_templates(
transport: str = "grpc", request_type=services.ListJobTemplatesRequest
):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobTemplatesResponse(
next_page_token="<PASSWORD>",
)
response = client.list_job_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobTemplatesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobTemplatesPager)
assert response.next_page_token == "<PASSWORD>"
def test_list_job_templates_from_dict():
test_list_job_templates(request_type=dict)
def test_list_job_templates_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
client.list_job_templates()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobTemplatesRequest()
@pytest.mark.asyncio
async def test_list_job_templates_async(
transport: str = "grpc_asyncio", request_type=services.ListJobTemplatesRequest
):
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobTemplatesResponse(next_page_token="next_page_token_value",)
)
response = await client.list_job_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == services.ListJobTemplatesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListJobTemplatesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_job_templates_async_from_dict():
await test_list_job_templates_async(request_type=dict)
def test_list_job_templates_field_headers():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.ListJobTemplatesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
call.return_value = services.ListJobTemplatesResponse()
client.list_job_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_job_templates_field_headers_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = services.ListJobTemplatesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobTemplatesResponse()
)
await client.list_job_templates(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_job_templates_flattened():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobTemplatesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_job_templates(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_job_templates_flattened_error():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_job_templates(
services.ListJobTemplatesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_job_templates_flattened_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = services.ListJobTemplatesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
services.ListJobTemplatesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_job_templates(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_job_templates_flattened_error_async():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_job_templates(
services.ListJobTemplatesRequest(), parent="parent_value",
)
def test_list_job_templates_pager():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobTemplatesResponse(
job_templates=[
resources.JobTemplate(),
resources.JobTemplate(),
resources.JobTemplate(),
],
next_page_token="abc",
),
services.ListJobTemplatesResponse(job_templates=[], next_page_token="def",),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(),], next_page_token="ghi",
),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(), resources.JobTemplate(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_job_templates(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, resources.JobTemplate) for i in results)
def test_list_job_templates_pages():
client = TranscoderServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobTemplatesResponse(
job_templates=[
resources.JobTemplate(),
resources.JobTemplate(),
resources.JobTemplate(),
],
next_page_token="abc",
),
services.ListJobTemplatesResponse(job_templates=[], next_page_token="def",),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(),], next_page_token="ghi",
),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(), resources.JobTemplate(),],
),
RuntimeError,
)
pages = list(client.list_job_templates(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_job_templates_async_pager():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobTemplatesResponse(
job_templates=[
resources.JobTemplate(),
resources.JobTemplate(),
resources.JobTemplate(),
],
next_page_token="abc",
),
services.ListJobTemplatesResponse(job_templates=[], next_page_token="def",),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(),], next_page_token="ghi",
),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(), resources.JobTemplate(),],
),
RuntimeError,
)
async_pager = await client.list_job_templates(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, resources.JobTemplate) for i in responses)
@pytest.mark.asyncio
async def test_list_job_templates_async_pages():
client = TranscoderServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_job_templates),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
services.ListJobTemplatesResponse(
job_templates=[
resources.JobTemplate(),
resources.JobTemplate(),
resources.JobTemplate(),
],
next_page_token="abc",
),
services.ListJobTemplatesResponse(job_templates=[], next_page_token="def",),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(),], next_page_token="ghi",
),
services.ListJobTemplatesResponse(
job_templates=[resources.JobTemplate(), resources.JobTemplate(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_job_templates(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_get_job_template(
transport: str = "grpc", request_type=services.GetJobTemplateRequest
):
client = TranscoderServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_job_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.JobTemplate(name="name_value",)
response = client.get_job_template(request)
# Establish | |
main.threadID,
name = "getDeviceLinksActiveCount",
args = [dpid])
t.start()
pool.append(t)
i = i + 1
main.threadID = main.threadID + 1
for thread in pool:
thread.join()
linkCountResult = thread.result
#linkCountTemp = re.split( r'\t+', linkCountResult )
#linkCount = linkCountTemp[ 1 ].replace( "\r\r\n\x1b[32m", "" )
deviceActiveLinksCountTemp.append( linkCountResult )
time2 = time.time()
main.log.info("Time for counting all enabled links of the switches: %2f seconds" %(time2-time1))
main.log.info (
"Device Active links EXPECTED: %s" %
str( main.deviceActiveLinksCount ) )
main.log.info (
"Device Active links ACTUAL: %s" % str( deviceActiveLinksCountTemp ) )
if ( cmp( main.deviceActiveLinksCount, deviceActiveLinksCountTemp ) == 0 ):
stepResult2 = main.TRUE
else:
stepResult2 = main.FALSE
"""
place holder for comparing devices, hosts, paths and intents if required.
Links and ports data would be incorrect with out devices anyways.
"""
caseResult = ( stepResult1 and stepResult2 )
if caseResult:
break
else:
time.sleep( main.topoCheckDelay )
main.log.warn( "Topology check failed. Trying again..." )
utilities.assert_equals( expect=main.TRUE, actual=caseResult,
onpass="Compare Topology test PASS",
onfail="Compare Topology test FAIL" )
def CASE60( self ):
"""
Install 300 host intents and verify ping all (Att Topology)
"""
main.log.report( "Add 300 host intents and verify pingall (Att Topology)" )
main.log.report( "_______________________________________" )
import itertools
import time
main.case( "Install 300 host intents" )
main.step( "Add host Intents" )
intentResult = main.TRUE
hostCombos = list( itertools.combinations( main.hostMACs, 2 ) )
intentIdList = []
time1 = time.time()
for i in xrange( 0, len( hostCombos ), int(main.numCtrls) ):
pool = []
for cli in main.CLIs:
if i >= len( hostCombos ):
break
t = main.Thread( target=cli.addHostIntent,
threadID=main.threadID,
name="addHostIntent",
args=[hostCombos[i][0],hostCombos[i][1]])
pool.append(t)
t.start()
i = i + 1
main.threadID = main.threadID + 1
for thread in pool:
thread.join()
intentIdList.append(thread.result)
time2 = time.time()
main.log.info("Time for adding host intents: %2f seconds" %(time2-time1))
# Saving intent ids to check intents in later cases
main.intentIds = list(intentIdList)
main.step("Verify intents are installed")
# Giving onos multiple chances to install intents
for i in range( main.intentCheck ):
if i != 0:
main.log.warn( "Verification failed. Retrying..." )
main.log.info("Waiting for onos to install intents...")
time.sleep( main.checkIntentsDelay )
intentState = main.TRUE
for e in range(int(main.numCtrls)):
main.log.info( "Checking intents on CLI %s" % (e+1) )
intentState = main.CLIs[e].checkIntentState( intentsId = intentIdList ) and\
intentState
if not intentState:
main.log.warn( "Not all intents installed" )
if intentState:
break
else:
#Dumping intent summary
main.log.info( "**** Intent Summary ****\n" + str(main.ONOScli1.intents( jsonFormat=False, summary=True)) )
utilities.assert_equals( expect=main.TRUE, actual=intentState,
onpass="INTENTS INSTALLED",
onfail="SOME INTENTS NOT INSTALLED" )
main.step( "Verify Ping across all hosts" )
for i in range(main.numPings):
time1 = time.time()
pingResult = main.Mininet1.pingall(timeout=main.pingTimeout)
if not pingResult:
main.log.warn("First pingall failed. Retrying...")
time.sleep(3)
else: break
time2 = time.time()
timeDiff = round( ( time2 - time1 ), 2 )
main.log.report(
"Time taken for Ping All: " +
str( timeDiff ) +
" seconds" )
utilities.assert_equals( expect=main.TRUE, actual=pingResult,
onpass="PING ALL PASS",
onfail="PING ALL FAIL" )
caseResult = ( intentState and pingResult )
utilities.assert_equals(
expect=main.TRUE,
actual=caseResult,
onpass="Install 300 Host Intents and Ping All test PASS",
onfail="Install 300 Host Intents and Ping All test FAIL" )
if not intentState:
main.log.debug( "Intents failed to install completely" )
if not pingResult:
main.log.debug( "Pingall failed" )
if not caseResult and main.failSwitch:
main.log.report("Stopping test")
main.stop( email=main.emailOnStop )
def CASE61( self ):
"""
Install 300 host intents and verify ping all for Chordal Topology
"""
main.log.report( "Add 300 host intents and verify pingall (Chordal Topo)" )
main.log.report( "_______________________________________" )
import itertools
main.case( "Install 300 host intents" )
main.step( "Add host Intents" )
intentResult = main.TRUE
hostCombos = list( itertools.combinations( main.hostMACs, 2 ) )
intentIdList = []
time1 = time.time()
for i in xrange( 0, len( hostCombos ), int(main.numCtrls) ):
pool = []
for cli in main.CLIs:
if i >= len( hostCombos ):
break
t = main.Thread( target=cli.addHostIntent,
threadID=main.threadID,
name="addHostIntent",
args=[hostCombos[i][0],hostCombos[i][1]])
pool.append(t)
t.start()
i = i + 1
main.threadID = main.threadID + 1
for thread in pool:
thread.join()
intentIdList.append(thread.result)
time2 = time.time()
main.log.info("Time for adding host intents: %2f seconds" %(time2-time1))
# Saving intent ids to check intents in later cases
main.intentIds = list(intentIdList)
main.step("Verify intents are installed")
# Giving onos multiple chances to install intents
for i in range( main.intentCheck ):
if i != 0:
main.log.warn( "Verification failed. Retrying..." )
main.log.info("Waiting for onos to install intents...")
time.sleep( main.checkIntentsDelay )
intentState = main.TRUE
for e in range(int(main.numCtrls)):
main.log.info( "Checking intents on CLI %s" % (e+1) )
intentState = main.CLIs[e].checkIntentState( intentsId = intentIdList ) and\
intentState
if not intentState:
main.log.warn( "Not all intents installed" )
if intentState:
break
else:
#Dumping intent summary
main.log.info( "**** Intents Summary ****\n" + str(main.ONOScli1.intents(jsonFormat=False, summary=True)) )
utilities.assert_equals( expect=main.TRUE, actual=intentState,
onpass="INTENTS INSTALLED",
onfail="SOME INTENTS NOT INSTALLED" )
main.step( "Verify Ping across all hosts" )
for i in range(main.numPings):
time1 = time.time()
pingResult = main.Mininet1.pingall(timeout=main.pingTimeout)
if not pingResult:
main.log.warn("First pingall failed. Retrying...")
time.sleep(main.pingSleep)
else: break
time2 = time.time()
timeDiff = round( ( time2 - time1 ), 2 )
main.log.report(
"Time taken for Ping All: " +
str( timeDiff ) +
" seconds" )
utilities.assert_equals( expect=main.TRUE, actual=pingResult,
onpass="PING ALL PASS",
onfail="PING ALL FAIL" )
caseResult = ( intentState and pingResult )
utilities.assert_equals(
expect=main.TRUE,
actual=caseResult,
onpass="Install 300 Host Intents and Ping All test PASS",
onfail="Install 300 Host Intents and Ping All test FAIL" )
if not intentState:
main.log.debug( "Intents failed to install completely" )
if not pingResult:
main.log.debug( "Pingall failed" )
if not caseResult and main.failSwitch:
main.log.report("Stopping test")
main.stop( email=main.emailOnStop )
def CASE62( self ):
"""
Install 2278 host intents and verify ping all for Spine Topology
"""
main.log.report( "Add 2278 host intents and verify pingall (Spine Topo)" )
main.log.report( "_______________________________________" )
import itertools
main.case( "Install 2278 host intents" )
main.step( "Add host Intents" )
intentResult = main.TRUE
hostCombos = list( itertools.combinations( main.hostMACs, 2 ) )
main.pingTimeout = 300
intentIdList = []
time1 = time.time()
for i in xrange( 0, len( hostCombos ), int(main.numCtrls) ):
pool = []
for cli in main.CLIs:
if i >= len( hostCombos ):
break
t = main.Thread( target=cli.addHostIntent,
threadID=main.threadID,
name="addHostIntent",
args=[hostCombos[i][0],hostCombos[i][1]])
pool.append(t)
t.start()
i = i + 1
main.threadID = main.threadID + 1
for thread in pool:
thread.join()
intentIdList.append(thread.result)
time2 = time.time()
main.log.info("Time for adding host intents: %2f seconds" %(time2-time1))
# Saving intent ids to check intents in later cases
main.intentIds = list(intentIdList)
main.step("Verify intents are installed")
# Giving onos multiple chances to install intents
for i in range( main.intentCheck ):
if i != 0:
main.log.warn( "Verification failed. Retrying..." )
main.log.info("Waiting for onos to install intents...")
time.sleep( main.checkIntentsDelay )
intentState = main.TRUE
for e in range(int(main.numCtrls)):
main.log.info( "Checking intents on CLI %s" % (e+1) )
intentState = main.CLIs[e].checkIntentState( intentsId = intentIdList ) and\
intentState
if not intentState:
main.log.warn( "Not all intents installed" )
if intentState:
break
else:
#Dumping intent summary
main.log.info( "**** Intents Summary ****\n" + str(main.ONOScli1.intents(jsonFormat=False, summary=True)) )
utilities.assert_equals( expect=main.TRUE, actual=intentState,
onpass="INTENTS INSTALLED",
onfail="SOME INTENTS NOT INSTALLED" )
main.step( "Verify Ping across all hosts" )
for i in range(main.numPings):
time1 = time.time()
pingResult = main.Mininet1.pingall(timeout=main.pingTimeout)
if not pingResult:
main.log.warn("First pingall failed. Retrying...")
time.sleep(main.pingSleep)
else: break
time2 = time.time()
timeDiff = round( ( time2 - time1 ), 2 )
main.log.report(
"Time taken for Ping All: " +
str( timeDiff ) +
" seconds" )
utilities.assert_equals( expect=main.TRUE, actual=pingResult,
onpass="PING ALL PASS",
onfail="PING ALL FAIL" )
caseResult = ( intentState and pingResult )
utilities.assert_equals(
expect=main.TRUE,
actual=caseResult,
onpass="Install 2278 Host Intents and Ping All test PASS",
onfail="Install 2278 Host Intents and Ping All test FAIL" )
if not intentState:
main.log.debug( "Intents failed to install completely" )
if not pingResult:
main.log.debug( "Pingall failed" )
if not caseResult and main.failSwitch:
main.log.report("Stopping test")
main.stop( email=main.emailOnStop )
def CASE160( self ):
"""
Verify IPv6 ping across 300 host intents (Att Topology)
"""
main.log.report( "Verify IPv6 ping across 300 host intents (Att Topology)" )
main.log.report( "_________________________________________________" )
import itertools
import time
main.case( "IPv6 ping all 300 host intents" )
main.step( "Verify IPv6 Ping across all hosts" )
pingResult = main.FALSE
time1 = time.time()
pingResult = main.Mininet1.pingall( protocol="IPv6", timeout=main.pingTimeout )
if not pingResult:
main.log.warn("First pingall | |
"""
Utility functions for Jupyter notebook to:
- format data
- transform pandas data structures
- compute common stats
These functions are used for both interactive data exploration and to implement
more complex pipelines. The output is reported through logging.
"""
import datetime
import logging
import math
from typing import (
Any,
Callable,
Collection,
Dict,
List,
Optional,
Tuple,
Union,
cast,
)
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
import statsmodels
import statsmodels.api
import tqdm.autonotebook as tauton
import core.plotting as cplott
import helpers.dbg as dbg
import helpers.list as hlist
import helpers.printing as hprint
_LOG = logging.getLogger(__name__)
# #############################################################################
# Helpers.
# #############################################################################
# TODO(gp): Move this to helpers/pandas_helpers.py
def cast_to_df(obj: Union[pd.Series, pd.DataFrame]) -> pd.DataFrame:
"""
Convert a pandas object into a pd.DataFrame.
"""
if isinstance(obj, pd.Series):
df = pd.DataFrame(obj)
else:
df = obj
dbg.dassert_isinstance(df, pd.DataFrame)
return df
def cast_to_series(obj: Union[pd.Series, pd.DataFrame]) -> pd.Series:
"""
Convert a pandas object into a pd.Series.
"""
if isinstance(obj, pd.DataFrame):
dbg.dassert_eq(obj.shape[1], 1)
srs = obj.iloc[:, 1]
else:
srs = obj
dbg.dassert_isinstance(srs, pd.Series)
return srs
# TODO(gp): Need to be tested.
def adapt_to_series(f: Callable) -> Callable:
"""
Extend a function working on dataframes so that it can work on series.
"""
def wrapper(
obj: Union[pd.Series, pd.DataFrame], *args: Any, **kwargs: Any
) -> Any:
# Convert a pd.Series to a pd.DataFrame.
was_series = False
if isinstance(obj, pd.Series):
obj = pd.DataFrame(obj)
was_series = True
dbg.dassert_isinstance(obj, pd.DataFrame)
# Apply the function.
res = f(obj, *args, **kwargs)
# Transform the output, if needed.
if was_series:
if isinstance(res, tuple):
res_obj, res_tmp = res[0], res[1:]
res_obj_srs = cast_to_series(res_obj)
res_obj_srs = [res_obj_srs]
res_obj_srs.extend(res_tmp)
res = tuple(res_obj_srs)
else:
res = cast_to_series(res)
return res
return wrapper
# #############################################################################
# Pandas helpers.
# #############################################################################
def drop_axis_with_all_nans(
df: pd.DataFrame,
drop_rows: bool = True,
drop_columns: bool = False,
drop_infs: bool = False,
report_stats: bool = False,
) -> pd.DataFrame:
"""
Remove columns and rows not containing information (e.g., with only nans).
The operation is not performed in place and the resulting df is returned.
Assume that the index is timestamps.
:param df: data frame to process
:param drop_rows: remove rows with only nans
:param drop_columns: remove columns with only nans
:param drop_infs: remove also +/- np.inf
:param report_stats: report the stats of the operations
"""
dbg.dassert_isinstance(df, pd.DataFrame)
if drop_infs:
df = df.replace([np.inf, -np.inf], np.nan)
if drop_columns:
# Remove columns with all nans, if any.
cols_before = df.columns[:]
df = df.dropna(axis=1, how="all")
if report_stats:
# Report results.
cols_after = df.columns[:]
removed_cols = set(cols_before).difference(set(cols_after))
pct_removed = hprint.perc(
len(cols_before) - len(cols_after), len(cols_after)
)
_LOG.info(
"removed cols with all nans: %s %s",
pct_removed,
hprint.list_to_str(removed_cols),
)
if drop_rows:
# Remove rows with all nans, if any.
rows_before = df.index[:]
df = df.dropna(axis=0, how="all")
if report_stats:
# Report results.
rows_after = df.index[:]
removed_rows = set(rows_before).difference(set(rows_after))
if len(rows_before) == len(rows_after):
# Nothing was removed.
min_ts = max_ts = None
else:
# TODO(gp): Report as intervals of dates.
min_ts = min(removed_rows)
max_ts = max(removed_rows)
pct_removed = hprint.perc(
len(rows_before) - len(rows_after), len(rows_after)
)
_LOG.info(
"removed rows with all nans: %s [%s, %s]",
pct_removed,
min_ts,
max_ts,
)
return df
def drop_na(
df: pd.DataFrame,
drop_infs: bool = False,
report_stats: bool = False,
*args: Any,
**kwargs: Any,
) -> pd.DataFrame:
"""
Wrapper around pd.dropna() reporting information about the removed rows.
"""
dbg.dassert_isinstance(df, pd.DataFrame)
num_rows_before = df.shape[0]
if drop_infs:
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna(*args, **kwargs)
if report_stats:
num_rows_after = df.shape[0]
pct_removed = hprint.perc(
num_rows_before - num_rows_after, num_rows_before
)
_LOG.info("removed rows with nans: %s", pct_removed)
return df
def report_zero_nan_inf_stats(
df: pd.DataFrame,
zero_threshold: float = 1e-9,
verbose: bool = False,
as_txt: bool = False,
) -> pd.DataFrame:
"""
Report count and percentage about zeros, nans, infs for a df.
"""
df = cast_to_df(df)
_LOG.info("index in [%s, %s]", df.index.min(), df.index.max())
#
num_rows = df.shape[0]
_LOG.info("num_rows=%s", hprint.thousand_separator(num_rows))
_LOG.info("data=")
display_df(df, max_lines=5, as_txt=as_txt)
#
num_days = len(set(df.index.date))
_LOG.info("num_days=%s", num_days)
#
num_weekdays = len(set(d for d in df.index.date if d.weekday() < 5))
_LOG.info("num_weekdays=%s", num_weekdays)
#
stats_df = pd.DataFrame(None, index=df.columns)
if False:
# Find the index of the first non-nan value.
df = df.applymap(lambda x: not np.isnan(x))
min_idx = df.idxmax(axis=0)
min_idx.name = "min_idx"
# Find the index of the last non-nan value.
max_idx = df.reindex(index=df.index[::-1]).idxmax(axis=0)
max_idx.name = "max_idx"
stats_df["num_rows"] = num_rows
#
num_zeros = (np.abs(df) < zero_threshold).sum(axis=0)
if verbose:
stats_df["num_zeros"] = num_zeros
stats_df["zeros [%]"] = (100.0 * num_zeros / num_rows).apply(
hprint.round_digits
)
#
num_nans = np.isnan(df).sum(axis=0)
if verbose:
stats_df["num_nans"] = num_nans
stats_df["nans [%]"] = (100.0 * num_nans / num_rows).apply(
hprint.round_digits
)
#
num_infs = np.isinf(df).sum(axis=0)
if verbose:
stats_df["num_infs"] = num_infs
stats_df["infs [%]"] = (100.0 * num_infs / num_rows).apply(
hprint.round_digits
)
#
num_valid = df.shape[0] - num_zeros - num_nans - num_infs
if verbose:
stats_df["num_valid"] = num_valid
stats_df["valid [%]"] = (100.0 * num_valid / num_rows).apply(
hprint.round_digits
)
#
display_df(stats_df, as_txt=as_txt)
return stats_df
def drop_duplicates(
df: pd.DataFrame, subset: Optional[List[str]] = None
) -> pd.DataFrame:
"""
Wrapper around pd.drop_duplicates() reporting information about theremoved
rows.
:df: Df to drop duplicates from.
:subset: Columns subset.
:return: Df without duplicates inside given columns subset.
"""
if not subset:
subset = df.columns
num_rows_before = df.shape[0]
df_no_duplicates = df.drop_duplicates(subset=subset)
num_rows_after = df_no_duplicates.shape[0]
pct_removed = hprint.perc(num_rows_before - num_rows_after, num_rows_before)
_LOG.info("Removed duplicated rows: %s", pct_removed)
return df_no_duplicates
# #############################################################################
# Column variability.
# #############################################################################
def _get_unique_elements_in_column(df: pd.DataFrame, col_name: str) -> List[Any]:
try:
vals = df[col_name].unique()
except TypeError:
# TypeError: unhashable type: 'list'
_LOG.error("Column '%s' has unhashable types", col_name)
vals = list(set(map(str, df[col_name])))
cast(List[Any], vals)
return vals
def _get_variable_cols(
df: pd.DataFrame, threshold: int = 1
) -> Tuple[List[str], List[str]]:
"""
Return columns of a df that contain less than <threshold> unique values.
:return: (variable columns, constant columns)
"""
var_cols = []
const_cols = []
for col_name in df.columns:
unique_elems = _get_unique_elements_in_column(df, col_name)
num_unique_elems = len(unique_elems)
if num_unique_elems <= threshold:
const_cols.append(col_name)
else:
var_cols.append(col_name)
return var_cols, const_cols
def remove_columns_with_low_variability(
df: pd.DataFrame, threshold: int = 1, log_level: int = logging.DEBUG
) -> pd.DataFrame:
"""
Remove columns of a df that contain less than <threshold> unique values.
:return: df with only columns with sufficient variability
"""
var_cols, const_cols = _get_variable_cols(df, threshold=threshold)
_LOG.log(log_level, "# Constant cols")
for col_name in const_cols:
unique_elems = _get_unique_elements_in_column(df, col_name)
_LOG.log(
log_level,
" %s: %s",
col_name,
hprint.list_to_str(list(map(str, unique_elems))),
)
_LOG.log(log_level, "# Var cols")
_LOG.log(log_level, hprint.list_to_str(var_cols))
return df[var_cols]
def print_column_variability(
df: pd.DataFrame,
max_num_vals: int = 3,
num_digits: int = 2,
use_thousands_separator: bool = True,
) -> pd.DataFrame:
"""
Print statistics about the values in each column of a data frame.
This is useful to get a sense of which columns are interesting.
"""
print(("# df.columns=%s" % hprint.list_to_str(df.columns)))
res = []
for c in tauton.tqdm(df.columns, desc="Computing column variability"):
vals = _get_unique_elements_in_column(df, c)
try:
min_val = min(vals)
except TypeError as e:
_LOG.debug("Column='%s' reported %s", c, e)
min_val = "nan"
try:
max_val = max(vals)
except TypeError as e:
_LOG.debug("Column='%s' reported %s", c, e)
max_val = "nan"
if len(vals) <= max_num_vals:
txt = ", ".join(map(str, vals))
else:
txt = ", ".join(map(str, [min_val, "...", max_val]))
row = ["%20s" % c, len(vals), txt]
res.append(row)
res = pd.DataFrame(res, columns=["col_name", "num", "elems"])
res.sort_values("num", inplace=True)
# TODO(gp): Fix this.
# res = add_count_as_idx(res)
res = add_pct(
res,
"num",
df.shape[0],
"[diff %]",
num_digits=num_digits,
use_thousands_separator=use_thousands_separator,
)
res.reset_index(drop=True, inplace=True)
return res
def add_pct(
df: pd.DataFrame,
col_name: str,
total: int,
dst_col_name: str,
num_digits: int = 2,
use_thousands_separator: bool = True,
) -> pd.DataFrame:
"""
Add to df a column "dst_col_name" storing the percentage of values in
column "col_name" with respect to "total". The rest of the parameters are
the same as hprint.round_digits().
:return: updated df
"""
# Add column with percentage right after col_name.
pos_col_name = df.columns.tolist().index(col_name)
df.insert(pos_col_name + 1, dst_col_name, (100.0 * df[col_name]) / total)
# Format.
df[col_name] = [
hprint.round_digits(
v, num_digits=None, use_thousands_separator=use_thousands_separator
)
for v in df[col_name]
]
df[dst_col_name] = [
hprint.round_digits(
v, num_digits=num_digits, use_thousands_separator=False
)
for v in df[dst_col_name]
]
return df
# #############################################################################
# Pandas data structure stats.
# #############################################################################
# TODO(gp): Explain what this is supposed to do.
def breakdown_table(
df: pd.DataFrame,
col_name: str,
num_digits: int = 2,
use_thousands_separator: bool = True,
verbosity: bool = False,
) -> pd.DataFrame:
if isinstance(col_name, list):
for c in col_name:
print(("\n" + hprint.frame(c).rstrip("\n")))
res = breakdown_table(df, c)
print(res)
return None
#
if verbosity:
print(("# col_name=%s" % col_name))
first_col_name = df.columns[0]
| |
numbers.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that an array with two `null` values is returned if any
value is such a value.
:return: An array containing the minimum and maximum values for the specified numbers. The first element is
the minimum, the second element is the maximum. If the input array is empty both elements are set to
`null`.
"""
return process('extrema', data=data, ignore_nodata=ignore_nodata)
def filter_bands(data, bands=UNSET, wavelengths=UNSET) -> ProcessBuilder:
"""
Filter the bands by name
:param data: A data cube with bands.
:param bands: A list of band names. Either the unique band name (metadata field `name` in bands) or one of
the common band names (metadata field `common_name` in bands). If unique band name and common name
conflict, the unique band name has higher priority. The order of the specified array defines the order of
the bands in the data cube. If multiple bands match a common name, all matched bands are included in the
original order.
:param wavelengths: A list of sub-lists with each sub-list consisting of two elements. The first element is
the minimum wavelength and the second element is the maximum wavelength. Wavelengths are specified in
micrometres (μm). The order of the specified array defines the order of the bands in the data cube. If
multiple bands match the wavelengths, all matched bands are included in the original order.
:return: A data cube limited to a subset of its original bands. The dimensions and dimension properties
(name, type, labels, reference system and resolution) remain unchanged, except that the dimension of type
`bands` has less (or the same) dimension labels.
"""
return process('filter_bands', data=data, bands=bands, wavelengths=wavelengths)
def filter_bbox(data, extent) -> ProcessBuilder:
"""
Spatial filter using a bounding box
:param data: A data cube.
:param extent: A bounding box, which may include a vertical axis (see `base` and `height`).
:return: A data cube restricted to the bounding box. The dimensions and dimension properties (name, type,
labels, reference system and resolution) remain unchanged, except that the spatial dimensions have less (or
the same) dimension labels.
"""
return process('filter_bbox', data=data, extent=extent)
def filter_labels(data, condition, dimension, context=UNSET) -> ProcessBuilder:
"""
Filter dimension labels based on a condition
:param data: A data cube.
:param condition: A condition that is evaluated against each dimension label in the specified dimension. A
dimension label and the corresponding data is preserved for the given dimension, if the condition returns
`true`.
:param dimension: The name of the dimension to filter on. Fails with a `DimensionNotAvailable` error if the
specified dimension does not exist.
:param context: Additional data to be passed to the condition.
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged, except that the given dimension has less (or the same) dimension
labels.
"""
return process('filter_labels', data=data, condition=condition, dimension=dimension, context=context)
def filter_spatial(data, geometries) -> ProcessBuilder:
"""
Spatial filter using geometries
:param data: A data cube.
:param geometries: One or more geometries used for filtering, specified as GeoJSON.
:return: A data cube restricted to the specified geometries. The dimensions and dimension properties (name,
type, labels, reference system and resolution) remain unchanged, except that the spatial dimensions have
less (or the same) dimension labels.
"""
return process('filter_spatial', data=data, geometries=geometries)
def filter_temporal(data, extent, dimension=UNSET) -> ProcessBuilder:
"""
Temporal filter for a temporal intervals
:param data: A data cube.
:param extent: Left-closed temporal interval, i.e. an array with exactly two elements: 1. The first
element is the start of the temporal interval. The specified instance in time is **included** in the
interval. 2. The second element is the end of the temporal interval. The specified instance in time is
**excluded** from the interval. The specified temporal strings follow [RFC
3339](https://tools.ietf.org/html/rfc3339). Also supports open intervals by setting one of the boundaries
to `null`, but never both.
:param dimension: The name of the temporal dimension to filter on. If the dimension is not set or is set to
`null`, the filter applies to all temporal dimensions. Fails with a `DimensionNotAvailable` error if the
specified dimension does not exist.
:return: A data cube restricted to the specified temporal extent. The dimensions and dimension properties
(name, type, labels, reference system and resolution) remain unchanged, except that the given temporal
dimension(s) have less (or the same) dimension labels.
"""
return process('filter_temporal', data=data, extent=extent, dimension=dimension)
def first(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
First element
:param data: An array with elements of any data type.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if the first value is such a
value.
:return: The first element of the input array.
"""
return process('first', data=data, ignore_nodata=ignore_nodata)
def floor(x) -> ProcessBuilder:
"""
Round fractions down
:param x: A number to round down.
:return: The number rounded down.
"""
return process('floor', x=x)
def gt(x, y) -> ProcessBuilder:
"""
Greater than comparison
:param x: First operand.
:param y: Second operand.
:return: `true` if `x` is strictly greater than `y` or `null` if any operand is `null`, otherwise `false`.
"""
return process('gt', x=x, y=y)
def gte(x, y) -> ProcessBuilder:
"""
Greater than or equal to comparison
:param x: First operand.
:param y: Second operand.
:return: `true` if `x` is greater than or equal to `y`, `null` if any operand is `null`, otherwise `false`.
"""
return process('gte', x=x, y=y)
def if_(value, accept, reject=UNSET) -> ProcessBuilder:
"""
If-Then-Else conditional
:param value: A boolean value.
:param accept: A value that is returned if the boolean value is `true`.
:param reject: A value that is returned if the boolean value is **not** `true`. Defaults to `null`.
:return: Either the `accept` or `reject` argument depending on the given boolean value.
"""
return process('if', value=value, accept=accept, reject=reject)
def int(x) -> ProcessBuilder:
"""
Integer part of a number
:param x: A number.
:return: Integer part of the number.
"""
return process('int', x=x)
def is_nan(x) -> ProcessBuilder:
"""
Value is not a number
:param x: The data to check.
:return: `true` if the data is not a number, otherwise `false`
"""
return process('is_nan', x=x)
def is_nodata(x) -> ProcessBuilder:
"""
Value is not a no-data value
:param x: The data to check.
:return: `true` if the data is a no-data value, otherwise `false`
"""
return process('is_nodata', x=x)
def is_valid(x) -> ProcessBuilder:
"""
Value is valid data
:param x: The data to check.
:return: `true` if the data is valid, otherwise `false`.
"""
return process('is_valid', x=x)
def last(data, ignore_nodata=UNSET) -> ProcessBuilder:
"""
Last element
:param data: An array with elements of any data type.
:param ignore_nodata: Indicates whether no-data values are ignored or not. Ignores them by default. Setting
this flag to `false` considers no-data values so that `null` is returned if the last value is such a value.
:return: The last element of the input array.
"""
return process('last', data=data, ignore_nodata=ignore_nodata)
def linear_scale_range(x, inputMin, inputMax, outputMin=UNSET, outputMax=UNSET) -> ProcessBuilder:
"""
Linear transformation between two ranges
:param x: A number to transform. The number gets clipped to the bounds specified in `inputMin` and
`inputMax`.
:param inputMin: Minimum value the input can obtain.
:param inputMax: Maximum value the input can obtain.
:param outputMin: Minimum value of the desired output range.
:param outputMax: Maximum value of the desired output range.
:return: The transformed number.
"""
return process('linear_scale_range', x=x, inputMin=inputMin, inputMax=inputMax, outputMin=outputMin, outputMax=outputMax)
def ln(x) -> ProcessBuilder:
"""
Natural logarithm
:param x: A number to compute the natural logarithm for.
:return: The computed natural logarithm.
"""
return process('ln', x=x)
def load_collection(id, spatial_extent, temporal_extent, bands=UNSET, properties=UNSET) -> ProcessBuilder:
"""
Load a collection
:param id: The collection id.
:param spatial_extent: Limits the data to load from the collection to the specified bounding box or
polygons. The process puts a pixel into the data cube if the point at | |
Create data agreement didcomm crud transaction record
data_agreement_crud_didcomm_transaction_record = DataAgreementCRUDDIDCommTransaction(
thread_id=read_data_agreement_message._thread_id,
message_type=DataAgreementCRUDDIDCommTransaction.MESSAGE_TYPE_READ_DATA_AGREEMENT,
messages_list=[read_data_agreement_message.serialize()],
connection_id=self.context.connection_record.connection_id,
)
await data_agreement_crud_didcomm_transaction_record.save(self.context)
try:
# Fetch the data agreement instance metadata
data_agreement_instance_metadata_records = await self.query_data_agreement_instance_metadata(
tag_query={
'data_agreement_id': read_data_agreement_message.body.data_agreement_id,
}
)
# Check if there is a data agreement instance metadata record
if not data_agreement_instance_metadata_records:
self._logger.info(
"Data agreement not found; Failed to process read-data-agreement message data agreement: %s",
read_data_agreement_message.body.data_agreement_id,
)
# send problem report
problem_report = DataAgreementProblemReport(
from_did=read_data_agreement_message.to_did,
to_did=read_data_agreement_message.from_did,
created_time=str(
int(datetime.datetime.utcnow().timestamp())),
problem_code=DataAgreementProblemReportReason.DATA_AGREEMENT_NOT_FOUND.value,
explain=f"Data agreement not found; Failed to process read-data-agreement message data agreement: {read_data_agreement_message.body.data_agreement_id}",
)
problem_report.assign_thread_id(
thid=read_data_agreement_message._thread_id
)
# Update data agreement crud diddcomm transaction record with response message
data_agreement_crud_didcomm_transaction_record.messages_list.append(
problem_report.serialize()
)
await data_agreement_crud_didcomm_transaction_record.save(self.context)
if responder:
await responder.send_reply(problem_report, connection_id=receipt.connection_id)
return None
if len(data_agreement_instance_metadata_records) > 1:
self._logger.info(
"Duplicate data agreement records found; Failed to process read-data-agreement message data agreement: %s",
read_data_agreement_message.body.data_agreement_id,
)
# send problem report
problem_report = DataAgreementProblemReport(
from_did=read_data_agreement_message.to_did,
to_did=read_data_agreement_message.from_did,
created_time=str(
int(datetime.datetime.utcnow().timestamp())),
problem_code=DataAgreementProblemReportReason.READ_DATA_AGREEMENT_FAILED.value,
explain=f"Duplicate data agreement records found; Failed to process read-data-agreement message data agreement: {read_data_agreement_message.body.data_agreement_id}",
)
problem_report.assign_thread_id(
thid=read_data_agreement_message._thread_id
)
# Update data agreement crud diddcomm transaction record with response message
data_agreement_crud_didcomm_transaction_record.messages_list.append(
problem_report.serialize()
)
await data_agreement_crud_didcomm_transaction_record.save(self.context)
if responder:
await responder.send_reply(problem_report, connection_id=receipt.connection_id)
return None
data_agreement_instance_metadata_record: StorageRecord = data_agreement_instance_metadata_records[
0]
# Identify the method of use
if data_agreement_instance_metadata_record.tags.get("method_of_use") == DataAgreementV1Record.METHOD_OF_USE_DATA_SOURCE:
# If method of use is data-source
# Fetch exchante record (credential exchange if method of use is "data-source")
tag_filter = {}
post_filter = {
"data_agreement_id": read_data_agreement_message.body.data_agreement_id
}
records = await V10CredentialExchange.query(self.context, tag_filter, post_filter)
if not records:
self._logger.info(
"Credential exchange record not found; Failed to process read-data-agreement message data agreement: %s",
read_data_agreement_message.body.data_agreement_id,
)
# send problem report
problem_report = DataAgreementProblemReport(
from_did=read_data_agreement_message.to_did,
to_did=read_data_agreement_message.from_did,
created_time=str(
int(datetime.datetime.utcnow().timestamp())),
problem_code=DataAgreementProblemReportReason.READ_DATA_AGREEMENT_FAILED.value,
explain=f"Credential exchange record not found; Failed to process read-data-agreement message data agreement: {read_data_agreement_message.body.data_agreement_id}",
)
problem_report.assign_thread_id(
thid=read_data_agreement_message._thread_id
)
# Update data agreement crud diddcomm transaction record with response message
data_agreement_crud_didcomm_transaction_record.messages_list.append(
problem_report.serialize()
)
await data_agreement_crud_didcomm_transaction_record.save(self.context)
if responder:
await responder.send_reply(problem_report, connection_id=receipt.connection_id)
return None
if len(records) > 1:
self._logger.info(
"Duplicate credential exchange records found; Failed to process read-data-agreement message data agreement: %s",
read_data_agreement_message.body.data_agreement_id,
)
# send problem report
problem_report = DataAgreementProblemReport(
from_did=read_data_agreement_message.to_did,
to_did=read_data_agreement_message.from_did,
created_time=str(
int(datetime.datetime.utcnow().timestamp())),
problem_code=DataAgreementProblemReportReason.READ_DATA_AGREEMENT_FAILED.value,
explain=f"Duplicate credential exchange records found; Failed to process read-data-agreement message data agreement: {read_data_agreement_message.body.data_agreement_id}",
)
problem_report.assign_thread_id(
thid=read_data_agreement_message._thread_id
)
# Update data agreement crud diddcomm transaction record with response message
data_agreement_crud_didcomm_transaction_record.messages_list.append(
problem_report.serialize()
)
await data_agreement_crud_didcomm_transaction_record.save(self.context)
if responder:
await responder.send_reply(problem_report, connection_id=receipt.connection_id)
return None
cred_ex_record: V10CredentialExchange = records[0]
# Construct data agreement instance
data_agreement_instance: DataAgreementInstance = DataAgreementInstanceSchema(
).load(cred_ex_record.data_agreement)
# Construct response message
read_data_agreement_response_message = ReadDataAgreementResponse(
from_did=read_data_agreement_message.to_did,
to_did=read_data_agreement_message.from_did,
created_time=str(
int(datetime.datetime.utcnow().timestamp())),
body=ReadDataAgreementResponseBody(
data_agreement=data_agreement_instance
)
)
read_data_agreement_response_message.assign_thread_id(
thid=read_data_agreement_message._thread_id
)
# Update data agreement crud diddcomm transaction record with response message
data_agreement_crud_didcomm_transaction_record.messages_list.append(
read_data_agreement_response_message.serialize()
)
await data_agreement_crud_didcomm_transaction_record.save(self.context)
if responder:
await responder.send_reply(read_data_agreement_response_message, connection_id=receipt.connection_id)
return None
if data_agreement_instance_metadata_record.tags.get("method_of_use") == DataAgreementV1Record.METHOD_OF_USE_DATA_USING_SERVICE:
# If method of use is data-using-service
# Fetch exchange record (presentation exchange if method of use is "data-using-service")
tag_filter = {}
post_filter = {
"data_agreement_id": read_data_agreement_message.body.data_agreement_id
}
records = await V10PresentationExchange.query(self.context, tag_filter, post_filter)
if not records:
self._logger.info(
"Presentation exchange record not found; Failed to process read-data-agreement message data agreement: %s",
read_data_agreement_message.body.data_agreement_id,
)
# send problem report
problem_report = DataAgreementProblemReport(
from_did=read_data_agreement_message.to_did,
to_did=read_data_agreement_message.from_did,
created_time=str(
int(datetime.datetime.utcnow().timestamp())),
problem_code=DataAgreementProblemReportReason.READ_DATA_AGREEMENT_FAILED.value,
explain=f"Presentation exchange record not found; Failed to process read-data-agreement message data agreement: {read_data_agreement_message.body.data_agreement_id}",
)
problem_report.assign_thread_id(
thid=read_data_agreement_message._thread_id
)
# Update data agreement crud diddcomm transaction record with response message
data_agreement_crud_didcomm_transaction_record.messages_list.append(
problem_report.serialize()
)
await data_agreement_crud_didcomm_transaction_record.save(self.context)
if responder:
await responder.send_reply(problem_report, connection_id=receipt.connection_id)
return None
if len(records) > 1:
self._logger.info(
"Duplicate presentation exchange records found; Failed to process read-data-agreement message data agreement: %s",
read_data_agreement_message.body.data_agreement_id,
)
# send problem report
problem_report = DataAgreementProblemReport(
from_did=read_data_agreement_message.to_did,
to_did=read_data_agreement_message.from_did,
created_time=str(
int(datetime.datetime.utcnow().timestamp())),
problem_code=DataAgreementProblemReportReason.READ_DATA_AGREEMENT_FAILED.value,
explain=f"Duplicate presentation exchange records found; Failed to process read-data-agreement message data agreement: {read_data_agreement_message.body.data_agreement_id}",
)
problem_report.assign_thread_id(
thid=read_data_agreement_message._thread_id
)
# Update data agreement crud diddcomm transaction record with response message
data_agreement_crud_didcomm_transaction_record.messages_list.append(
problem_report.serialize()
)
await data_agreement_crud_didcomm_transaction_record.save(self.context)
if responder:
await responder.send_reply(problem_report, connection_id=receipt.connection_id)
return None
pres_ex_record: V10PresentationExchange = records[0]
# Construct data agreement instance
data_agreement_instance: DataAgreementInstance = DataAgreementInstanceSchema(
).load(pres_ex_record.data_agreement)
# Construct response message
read_data_agreement_response_message = ReadDataAgreementResponse(
from_did=read_data_agreement_message.to_did,
to_did=read_data_agreement_message.from_did,
created_time=str(
int(datetime.datetime.utcnow().timestamp())),
body=ReadDataAgreementResponseBody(
data_agreement=data_agreement_instance
)
)
read_data_agreement_response_message.assign_thread_id(
thid=read_data_agreement_message._thread_id
)
# Update data agreement crud diddcomm transaction record with response message
data_agreement_crud_didcomm_transaction_record.messages_list.append(
read_data_agreement_response_message.serialize()
)
await data_agreement_crud_didcomm_transaction_record.save(self.context)
if responder:
await responder.send_reply(read_data_agreement_response_message, connection_id=receipt.connection_id)
return None
except (ADAManagerError, StorageError) as e:
# send problem report
problem_report = DataAgreementProblemReport(
from_did=read_data_agreement_message.to_did,
to_did=read_data_agreement_message.from_did,
created_time=str(
int(datetime.datetime.utcnow().timestamp())),
problem_code=DataAgreementProblemReportReason.READ_DATA_AGREEMENT_FAILED.value,
explain=str(e)
)
problem_report.assign_thread_id(
thid=read_data_agreement_message._thread_id
)
# Update data agreement crud diddcomm transaction record with response message
data_agreement_crud_didcomm_transaction_record.messages_list.append(
problem_report.serialize()
)
await data_agreement_crud_didcomm_transaction_record.save(self.context)
if responder:
await responder.send_reply(problem_report, connection_id=receipt.connection_id)
async def fetch_data_agreement_crud_didcomm_transactions_from_wallet(self):
try:
return await DataAgreementCRUDDIDCommTransaction.query(
self.context,
)
except StorageSearchError as e:
raise ADAManagerError(
f"Failed to fetch data agreement CRUD DIDComm transactions from wallet: {e}"
)
async def process_read_data_agreement_response_message(self, read_data_agreement_response_message: ReadDataAgreementResponse, receipt: MessageReceipt):
try:
# Fetch Data Agreement crud txn from wallet using the thread_id of the message
da_crud_didcomm_txn = await DataAgreementCRUDDIDCommTransaction.retrieve_by_tag_filter(
self.context,
{"thread_id": read_data_agreement_response_message._thread_id}
)
# Update the txn record with response message
da_crud_didcomm_txn.messages_list.append(
read_data_agreement_response_message.to_json())
await da_crud_didcomm_txn.save(self.context)
except (StorageNotFoundError, StorageDuplicateError):
pass
async def query_data_agreements_in_wallet(self, tag_filter: dict = None, include_fields: typing.List[str] = []) -> typing.List[DataAgreementV1Record]:
"""
Query data agreements in the wallet.
"""
try:
# If template_version is provided, then data agreements with that version will be returned
# publish_flag flag is not required for this query
template_version = None
if "template_version" in tag_filter:
template_version = tag_filter["template_version"]
tag_filter.pop("template_version", None)
# If delete_flag is not provided, then it defaults to false
# This would ensure the data agreements returned are the current version of the same. (draft or not)
# i.e. version (1) of a data agreement won't be returned if version (2) is available.
if "delete_flag" not in tag_filter:
tag_filter["delete_flag"] = bool_to_str(False)
self._logger.info(
f"Query data agreements in wallet with tag_filter: {tag_filter}")
# Query data agreements from the wallet
data_agreement_v1_records: typing.List[DataAgreementV1Record] = await DataAgreementV1Record.query(
self.context,
tag_filter=tag_filter
)
# Filter data agreements by template_version
if template_version:
data_agreement_v1_records = [
data_agreement_v1_record for data_agreement_v1_record in data_agreement_v1_records
if data_agreement_v1_record.data_agreement.get("template_version") == int(template_version)
]
return data_agreement_v1_records, [] if not data_agreement_v1_records else self.serialize_data_agreement_record(data_agreement_records=data_agreement_v1_records, is_list=True, include_fields=include_fields)
except StorageSearchError as e:
# Raise an error
raise ADAManagerError(
f"Failed to fetch all data agreements from wallet: {e}"
)
async def delete_data_agreement_in_wallet(self, data_agreement_id: str):
"""
Delete data agreement in wallet
"""
try:
# Query for the data agreement by id
data_agreement_record: DataAgreementV1Record = await DataAgreementV1Record.retrieve_non_deleted_data_agreement_by_id(
self.context,
data_agreement_id
)
# Mark the data agreement as deleted
data_agreement_record._delete_flag = True
# Save the data agreement record
await data_agreement_record.save(self.context)
except StorageError as err:
# Raise an error
raise ADAManagerError(
f"Failed to delete data agreement; Reason: {err.roll_up}"
)
async def create_and_store_da_personal_data_in_wallet(self, personal_data: DataAgreementPersonalData, da_template_id: str, da_template_version: int) -> DataAgreementPersonalDataRecord:
"""
Create and store personal data in the wallet.
"""
restrictions = []
if personal_data.restrictions:
for restriction in personal_data.restrictions:
restrictions.append(restriction.serialize())
new_personal_data_record = DataAgreementPersonalDataRecord(
attribute_name=personal_data.attribute_name,
attribute_category=personal_data.attribute_category,
attribute_sensitive="true" if personal_data.attribute_sensitive else "false",
attribute_description=personal_data.attribute_description,
restrictions=restrictions,
da_template_id=da_template_id,
da_template_version=da_template_version,
)
await new_personal_data_record.save(self.context)
return new_personal_data_record
async def list_da_personal_data_category_from_wallet(self) -> typing.List[str]:
"""
List personal data category in the wallet.
"""
try:
# Query for the old data agreement record by id
personal_data_records: typing.List[DataAgreementPersonalDataRecord] = await DataAgreementPersonalDataRecord.query(
self.context,
)
# Generate a list of category
personal_data_category_list = [
personal_data_record.attribute_category for personal_data_record in personal_data_records]
# Remove duplicates
personal_data_category_list = list(
set(personal_data_category_list))
return personal_data_category_list
except StorageSearchError as e:
# Raise an error
raise ADAManagerError(
f"Failed to fetch all data agreements from wallet: {e}"
)
async def mark_connection_id_as_auditor(self, connection_record: ConnectionRecord):
"""Associate the connection with Auditor"""
assert connection_record.connection_id, "Connection ID is required"
try:
# Fetch storage from context
storage: IndyStorage = await self.context.inject(BaseStorage)
# Search for existing connection_id marked as Auditor
connection_record_list = await storage.search_records(
self.RECORD_TYPE_AUDITOR_CONNECTION,
{"connection_id": connection_record.connection_id},
).fetch_all()
# If no record found, create a new one
if not connection_record_list:
record = StorageRecord(
self.RECORD_TYPE_AUDITOR_CONNECTION,
connection_record.connection_id,
{"connection_id": connection_record.connection_id},
)
await storage.add_record(record)
else:
# Update the existing record with the new connection_id
record = connection_record_list[0]
await storage.update_record_value(record=record, value=connection_record.connection_id)
await storage.update_record_tags(record=record, tags={"connection_id": connection_record.connection_id})
except StorageError as e:
# Raise an error
raise ADAManagerError(
f"Failed to mark connection as Auditor: {e}"
)
except StorageDuplicateError as e:
# Raise an error
raise ADAManagerError(
f"Failed to mark connection as Auditor: {e}"
| |
<filename>wmt-shared-task/segment-level/segment_level_prism.py
f"""
Shell script tho reproduce results for BERTScores in data from WMT18/19 Metrics Shared task.
"""
import argparse
import hashlib
import logging
import os
import sys
from typing import Any, Dict, Iterator, List
import numpy as np
import pandas as pd
import sentencepiece as spm
import torch
from tqdm import tqdm
from fairseq import utils
from fairseq import checkpoint_utils
from fairseq.data import LanguagePairDataset
#!/usr/bin/env python3
logger = logging.getLogger('prism')
logger.setLevel(logging.INFO)
MODELS = {
'8412b2044da4b9b2c0a8ce87b305d0d1': {
'name': 'm39v1',
'path': 'todo',
'date': '2020-04-30',
'description': 'model released with arXiv paper April 2020',
'langs': ['ar', 'bg', 'bn', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'eo', 'fi', 'fr', 'he',
'hr', 'hu', 'id', 'it', 'ja', 'kk', 'lt', 'lv', 'mk', 'nl', 'no', 'pl', 'pt', 'ro', 'ru',
'sk', 'sl', 'sq', 'sr', 'sv', 'tr', 'uk', 'vi', 'zh'],
}
}
def hash_model(model_dir):
md5 = hashlib.md5()
block_size = 2 ** 20
for fname in ('checkpoint.pt', 'spm.model', 'dict.src.txt', 'dict.tgt.txt'):
with open(os.path.join(model_dir, fname), "rb") as f:
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
md5.digest()
return md5.hexdigest()
"""
Copy of https://github.com/pytorch/fairseq/blob/master/fairseq/sequence_scorer.py
with softmax temperature control added
"""
class SequenceScorer(object):
"""Scores the target for a given source sentence."""
def __init__(self, tgt_dict, softmax_batch=None, temperature=1.0):
self.pad = tgt_dict.pad()
self.eos = tgt_dict.eos()
self.softmax_batch = softmax_batch or sys.maxsize
self.temperature = temperature
assert self.softmax_batch > 0
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample['net_input']
def batch_for_softmax(dec_out, target):
# assumes decoder_out[0] is the only thing needed (may not be correct for future models!)
first, rest = dec_out[0], dec_out[1:]
bsz, tsz, dim = first.shape
if bsz * tsz < self.softmax_batch:
yield dec_out, target, True
else:
flat = first.contiguous().view(1, -1, dim)
flat_tgt = target.contiguous().view(flat.shape[:-1])
s = 0
while s < flat.size(1):
e = s + self.softmax_batch
yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False
s = e
def gather_target_probs(probs, target):
probs = probs.gather(
dim=2,
index=target.unsqueeze(-1),
)
return probs
orig_target = sample['target']
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
for model in models:
model.eval()
decoder_out = model.forward(**net_input)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
batched = batch_for_softmax(decoder_out, orig_target)
probs, idx = None, 0
for bd, tgt, is_single in batched:
sample['target'] = tgt
# divide the logits by temperature prior to softmax
# for example, see https://github.com/pytorch/fairseq/blob/master/fairseq/sequence_generator.py:
# decoder_out[0][:, -1:, :].div_(temperature)
bd[0].div_(self.temperature)
curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data
if is_single:
probs = gather_target_probs(curr_prob, orig_target)
else:
if probs is None:
probs = curr_prob.new(orig_target.numel())
step = curr_prob.size(0) * curr_prob.size(1)
end = step + idx
tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt)
probs[idx:end] = tgt_probs.view(-1)
idx = end
sample['target'] = orig_target
probs = probs.view(sample['target'].shape)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None and torch.is_tensor(attn):
attn = attn.data
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(models) > 1:
avg_probs.div_(len(models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(models))
bsz = avg_probs.size(0)
hypos = []
start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz
for i in range(bsz):
# remove padding from ref
ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \
if sample['target'] is not None else None
tgt_len = ref.numel()
avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len]
score_i = avg_probs_i.sum() / tgt_len
if avg_attn is not None:
avg_attn_i = avg_attn[i]
alignment = utils.extract_hard_alignment(avg_attn_i, sample['net_input']['src_tokens'][i],
sample['target'][i], self.pad, self.eos)
else:
avg_attn_i = alignment = None
hypos.append([{
'tokens': ref,
'score': score_i,
'attention': avg_attn_i,
'alignment': alignment,
'positional_scores': avg_probs_i,
}])
return hypos
class Prism:
def __init__(self, model_dir, lang, temperature):
'''
model_dir should contain:
1) checkpoint.pt: the fairseq model
2) spm.model: the sentencepiece model
3) dict.src.txt: the fairseq source dictionary
4) dict.tgt.txt: the fairseq target dictionary (likely a copy of the source)
lang: ISO 639-1 Code (e.g. "en"). Must be a language compatable with the model.
'''
self.sp = spm.SentencePieceProcessor()
self.sp.Load(model_dir + '/spm.model')
self.lang = lang
self.temperature = temperature
# this prints things and I can't figure out how to disable it
sys.stdout = open(os.devnull, 'w')
self.models, self.args, self.task = checkpoint_utils.load_model_ensemble_and_task(
[model_dir + '/checkpoint.pt', ],
arg_overrides=dict(data=model_dir + '/'),
)
sys.stdout = sys.__stdout__
self.use_cuda = torch.cuda.is_available()
self.generator = SequenceScorer(self.task.target_dictionary, temperature=temperature)
for model in self.models:
if self.use_cuda:
model.cuda()
model.make_generation_fast_(
beamable_mm_beam_size=None,
need_attn=False,
)
# if model.args.fp16:
# model.half()
# hash model
self.model_hash = hash_model(model_dir)
if self.model_hash in MODELS:
model_langs = MODELS[self.model_hash]['langs']
if lang not in model_langs:
model_name = MODELS[self.model_hash]['name']
logger.warning(f'Language "{lang}" is unsupported for model "{model_name}"')
logger.warning(f'Supported languages for {model_name}: {", ".join(model_langs)}')
else:
logger.warning('unrecognized model, so cannot check language')
def identifier(self):
if self.model_hash in MODELS:
model_name = MODELS[self.model_hash]['name']
else:
logger.warning('unrecognized model, using hash to identify')
model_name = self.model_hash
return dict(version='0.1', model=model_name, seg_scores='avg_log_prob',
sys_scores='avg_log_prob', log_base=2, temperature=self.temperature)
def _binarize(self, sentence: str) -> torch.LongTensor:
return self.task.source_dictionary.encode_line(sentence, add_if_not_exist=False).long()
def _encode(self, sent, prepend=True):
sent = ' '.join(self.sp.EncodeAsPieces(sent))
if prepend:
sent = f'<{self.lang}> ' + sent
return self._binarize(sent)
def _build_batches(self,
source_tokens: List[List[int]],
target_tokens: List[List[int]],
skip_invalid_size_inputs: bool) -> Iterator[Dict[str, Any]]:
source_lengths = torch.LongTensor([t.numel() for t in source_tokens])
target_lengths = torch.LongTensor([t.numel() for t in target_tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=LanguagePairDataset(source_tokens, source_lengths, self.task.source_dictionary,
tgt=target_tokens, tgt_sizes=target_lengths,
tgt_dict=self.task.target_dictionary),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=(2000, 2000), # ???
ignore_invalid_inputs=skip_invalid_size_inputs,
).next_epoch_itr(shuffle=False)
return batch_iterator
def _score_forward(self, tok_sents_in, tok_sents_out):
assert len(tok_sents_in) == len(tok_sents_out)
tok_level_scores = [None, ] * len(tok_sents_in) # for debug
results = [None, ] * len(tok_sents_in)
for batch in self._build_batches(tok_sents_in, tok_sents_out, skip_invalid_size_inputs=False):
if self.use_cuda: # must be a better way
batch['id'] = batch['id'].cuda()
batch['net_input']['src_tokens'] = batch['net_input']['src_tokens'].cuda()
batch['net_input']['src_lengths'] = batch['net_input']['src_lengths'].cuda()
batch['net_input']['prev_output_tokens'] = batch['net_input']['prev_output_tokens'].cuda()
batch['target'] = batch['target'].cuda()
translations = self.task.inference_step(self.generator, self.models, batch)
ids = batch['id'].cpu().numpy()
tok_scores = [x[0]['positional_scores'].cpu().numpy() for x in translations]
# [1:] to skip language tag log prob
sent_scores = [np.mean(x[1:]) for x in tok_scores]
for _id, sent_score, _tok_score in zip(ids, sent_scores, tok_scores):
results[_id] = sent_score
tok_level_scores[_id] = _tok_score
if logger.level == logging.DEBUG:
for ii, (sent_in, scores_out, sent_out) in enumerate(zip(tok_sents_in, tok_level_scores, tok_sents_out)):
sent_in_str = ' '.join([self.task.source_dictionary[x] for x in sent_in])
logger.debug(f'Input[{ii}] = ' + sent_in_str)
sent_out_tok = [self.task.source_dictionary[x] for x in sent_out]
logger.debug(f'Output[{ii}] = ' + \
f' '.join([f'{a}[{b:.02f}]' for a, b in zip(sent_out_tok, scores_out)]))
if None in results:
raise Exception('Missing one or more sentence scores')
return np.array(results)
def score(self, cand, ref=None, src=None, segment_scores=False):
if not (ref is None) ^ (src is None):
raise Exception('Must provide exactly one of "ref" or "src"')
tokenized_cand = [self._encode(sentence, prepend=False) for sentence in cand]
tokenized_cand_prep = [self._encode(sentence, prepend=True) for sentence in cand]
if src is not None:
# Prism-src: score candidate given on source
if len(cand) != len(src):
raise Exception(f'Length of cand ({len(cand)}) does not match length of src ({len(src)})')
tokenized_src = [self._encode(sentence, prepend=False) for sentence in src]
scores = self._score_forward(tokenized_src, tokenized_cand_prep)
else:
# Prism-ref: average candidate given reference and reference given candidate
if len(cand) != len(ref):
raise Exception(f'Length of cand ({len(cand)}) does not match length of ref ({len(ref)})')
tokenized_ref = [self._encode(sentence, prepend=False) for sentence in ref]
tokenized_ref_prep = [self._encode(sentence, prepend=True) for sentence in ref]
forward_scores = self._score_forward(tok_sents_in=tokenized_ref, tok_sents_out=tokenized_cand_prep)
reverse_scores = self._score_forward(tok_sents_in=tokenized_cand, tok_sents_out=tokenized_ref_prep)
scores = 0.5 * forward_scores + 0.5 * reverse_scores
if not segment_scores:
scores = np.mean(scores)
return scores
def compute_kendall(
hyp1_scores: list, hyp2_scores: list, dataframe: pd.DataFrame
) -> (int, list):
""" Computes the official WMT19 shared task Kendall correlation score. """
assert len(hyp1_scores) == len(hyp2_scores) == len(data)
conc, disc = 0, 0
for i, row in tqdm(data.iterrows(), total=len(data), desc="Kendall eval..."):
if hyp1_scores[i] > hyp2_scores[i]:
conc += 1
else:
disc += 1
return (conc - disc) / (conc + disc)
def run_prism(mt: list, ref: list, language=False, temperature=1.0) -> list:
prism = Prism(model_dir="m39v1", lang=language, temperature=temperature)
scores = prism.score(cand=mt, ref=ref, segment_scores=True)
return list(scores)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Evaluates BERTScores against relative preferences."
)
parser.add_argument(
"--test_path",
default="wmt-metrics/wmt19/de-en/relative-ranks.csv",
help="Path to the test dataframe with relative preferences.",
type=str,
)
parser.add_argument(
"--language", default="en", help="Target language of the testset.", type=str,
)
parser.add_argument(
'--temperature',
type=float,
default=1.0,
help='Softmax temperature: values >1.0 produce more uniform samples and values <1.0 produce sharper samples')
parser.add_argument(
"--run_wmt18",
default=False,
help="Runs entire WMT18 evaluation.",
action="store_true",
)
parser.add_argument(
"--run_wmt19",
default=False,
help="Runs entire WMT19 evaluation.",
action="store_true",
)
args = parser.parse_args()
if args.run_wmt18:
lps = [
"en-cs",
"en-de",
"en-et",
"en-fi",
"en-ru",
"en-tr",
"en-zh",
"cs-en",
"de-en",
"et-en",
"fi-en",
"ru-en",
"tr-en",
"zh-en",
]
kendall_scores = {}
for lp in lps:
data = pd.read_csv(f"wmt-metrics/wmt18/{lp}/relative-ranks.csv")
hyp1_scores = run_prism([str(s) | |
########################################################################################
# Copyright 2019-2021 <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
########################################################################################
"""Tests for the :mod:`ci_exec.parsers.cmake_parser` module."""
from itertools import chain
from typing import Tuple
from ci_exec.parsers.cmake_parser import CMakeParser
from ci_exec.parsers.utils import env_or_platform_default
from ci_exec.utils import unset_env
import pytest
def default_cc_cxx() -> Tuple[str, str]:
"""Return the default ``(cc, cxx)`` for the current platform."""
cc = env_or_platform_default(
env="CC", windows="cl.exe", darwin="clang", other="gcc"
)
cxx = env_or_platform_default(
env="CXX", windows="cl.exe", darwin="clang++", other="g++"
)
return (cc, cxx)
def test_cmake_parser_is_x_config_generator():
"""
Validate |is_single_config_generator| and |is_multi_config_generator|.
.. |is_single_config_generator| replace::
:func:`~ci_exec.parsers.cmake_parser.CMakeParser.is_single_config_generator`
.. |is_multi_config_generator| replace::
:func:`~ci_exec.parsers.cmake_parser.CMakeParser.is_multi_config_generator`
"""
for g in chain(CMakeParser.makefile_generators, CMakeParser.ninja_generator):
assert CMakeParser.is_single_config_generator(g)
assert not CMakeParser.is_multi_config_generator(g)
for g in chain(CMakeParser.visual_studio_generators, CMakeParser.other_generators,
CMakeParser.ninja_multi_generator):
assert not CMakeParser.is_single_config_generator(g)
assert CMakeParser.is_multi_config_generator(g)
@unset_env("CC", "CXX")
def test_cmake_parser_defaults():
"""Validate the |CMakeParser| defaults are as expected."""
parser = CMakeParser()
args = parser.parse_args([])
assert args.generator == "Ninja"
assert args.architecture is None
assert args.toolset is None
assert not args.shared
assert not args.static
cc, cxx = default_cc_cxx()
assert args.cc == cc
assert args.cxx == cxx
assert args.build_type == "Release"
expected_configure_args = {
"-G", "Ninja",
f"-DCMAKE_C_COMPILER={cc}",
f"-DCMAKE_CXX_COMPILER={cxx}",
"-DCMAKE_BUILD_TYPE=Release"
}
assert set(args.cmake_configure_args) == expected_configure_args
assert len(args.cmake_build_args) == 0
def test_cmake_parser_add_argument_failues():
"""
Validate |cm_add_argument| fails with expected names.
.. |cm_add_argument| replace::
:func:`~ci_exec.parsers.cmake_parser.CMakeParser.add_argument`
"""
parser = CMakeParser()
with pytest.raises(ValueError) as ve_excinfo:
parser.add_argument("cmake_configure_args")
assert str(ve_excinfo.value) == "'cmake_configure_args' name is reserved."
with pytest.raises(ValueError) as ve_excinfo:
parser.add_argument("cmake_build_args")
assert str(ve_excinfo.value) == "'cmake_build_args' name is reserved."
with pytest.raises(ValueError) as ve_excinfo:
parser.add_argument("extra_args")
assert str(ve_excinfo.value) == \
"'extra_args' is reserved. Set `add_extra_args = False` first."
parser.add_extra_args = False
parser.add_argument("extra_args") # OK
@unset_env("CC", "CXX")
def test_cmake_parser_get_argument():
"""
Validate |get_argument| finds both flag and dest names.
.. |get_argument| replace::
:func:`~ci_exec.parsers.cmake_parser.CMakeParser.get_argument`
"""
parser = CMakeParser()
parser.add_argument("-f", "--flag", dest="flag", action="store_true")
parser.add_argument("positional", type=str, nargs=1)
# Run through default options added.
assert parser.get_argument("-G").default == "Ninja"
assert parser.get_argument("generator").default == "Ninja"
assert parser.get_argument("-A").default is None
assert parser.get_argument("architecture").default is None
assert parser.get_argument("-T").default is None
assert parser.get_argument("toolset").default is None
assert not parser.get_argument("--shared").default
assert not parser.get_argument("shared").default
assert not parser.get_argument("--static").default
assert not parser.get_argument("static").default
cc, cxx = default_cc_cxx()
assert parser.get_argument("--cc").default == cc
assert parser.get_argument("cc").default == cc
assert parser.get_argument("--cxx").default == cxx
assert parser.get_argument("cxx").default == cxx
assert parser.get_argument("--build-type").default == "Release"
assert parser.get_argument("build_type").default == "Release"
# Run through options user added.
assert not parser.get_argument("-f").default
assert not parser.get_argument("--flag").default
assert not parser.get_argument("flag").default
assert parser.get_argument("positional").nargs == 1
# None should be returned when argument not found.
assert parser.get_argument("--not-here") is None
@unset_env("CC", "CXX")
def test_cmake_parser_remove():
"""
Validate |remove| can remove registered arguments (except for generator).
.. |remove| replace::
:func:`~ci_exec.parsers.cmake_parser.CMakeParser.remove`
"""
parser = CMakeParser()
# Cannot remove generator.
for args in ["-G"], ["generator"], ["-G", "generator"]:
with pytest.raises(ValueError) as ve_excinfo:
parser.remove(*args)
assert str(ve_excinfo.value) == "'generator' argument may not be removed."
# extra_args is added in parse_args, must be prevented (nothing to remove).
with pytest.raises(ValueError) as ve_excinfo:
parser.remove("extra_args")
assert str(ve_excinfo.value) == (
"'extra_args' cannot be removed, it must be prevented. Set "
"`add_extra_args = False`."
)
# Unregistered arguments cannot be removed.
with pytest.raises(ValueError) as ve_excinfo:
parser.remove("foo")
assert str(ve_excinfo.value) == "Cannot remove unregistered arg(s): ['foo']"
with pytest.raises(ValueError) as ve_excinfo:
parser.remove("foo", "shared", "bar") # removes shared (!)
assert str(ve_excinfo.value) == "Cannot remove unregistered arg(s): ['foo', 'bar']"
# Test removing items and make sure parse_args doesn't include them.
flag_to_dest = {
"-G": "generator",
"-A": "architecture",
"-T": "toolset",
"--shared": "shared",
"--static": "static",
"--cc": "cc",
"--cxx": "cxx",
"--build-type": "build_type"
}
# Test removing one at a time by flags.
flags = [f for f in flag_to_dest]
parser = CMakeParser(add_extra_args=False)
while len(flags) > 1:
f = flags.pop(0)
if f == "-G":
flags.append(f)
continue
# Parse args, make sure the attribute was set.
args = parser.parse_args([])
assert hasattr(args, flag_to_dest[f])
assert all(hasattr(args, flag_to_dest[fl]) for fl in flags)
assert set(parser.flag_map.keys()) == set(flags + [f])
assert len(parser.flag_map) == len(parser.dest_map)
# Remove arg, make sure it is gone now but others still remain.
parser.remove(f)
args = parser.parse_args([])
assert not hasattr(args, flag_to_dest[f])
assert all(hasattr(args, flag_to_dest[fl]) for fl in flags)
assert set(parser.flag_map.keys()) == set(flags)
assert len(parser.flag_map) == len(parser.dest_map)
assert flags == ["-G"] # testing the test...
# Test removing one at a time by dests.
dests = [val for _, val in flag_to_dest.items()]
parser = CMakeParser(add_extra_args=False)
while len(dests) > 1:
d = dests.pop(0)
if d == "generator":
dests.append(d)
continue
# Parse args, make sure the attribute was set.
args = parser.parse_args([])
assert hasattr(args, d)
assert all(hasattr(args, de) for de in dests)
assert set(parser.dest_map.keys()) == set(dests + [d])
assert len(parser.dest_map) == len(parser.flag_map)
# Remove arg, make sure it is gone now but others still remain.
parser.remove(d)
args = parser.parse_args([])
assert not hasattr(args, d)
assert all(hasattr(args, de) for de in dests)
assert set(parser.dest_map.keys()) == set(dests)
assert len(parser.dest_map) == len(parser.flag_map)
assert dests == ["generator"] # testing the test...
# Remove all flags but generator at once.
parser = CMakeParser(add_extra_args=False)
args = parser.parse_args([])
assert all(hasattr(args, dest) for _, dest in flag_to_dest.items())
parser.remove(*[flag for flag in flag_to_dest if flag != "-G"])
args = parser.parse_args([])
for _, dest in flag_to_dest.items():
if dest == "generator":
assert hasattr(args, dest)
else:
assert not hasattr(args, dest)
# Remove all dests but generator at once.
parser = CMakeParser(add_extra_args=False)
args = parser.parse_args([])
assert all(hasattr(args, dest) for _, dest in flag_to_dest.items())
parser.remove(*[dest for _, dest in flag_to_dest.items() if dest != "generator"])
args = parser.parse_args([])
for _, dest in flag_to_dest.items():
if dest == "generator":
assert hasattr(args, dest)
else:
assert not hasattr(args, dest)
@unset_env("CC", "CXX")
def test_cmake_parser_set_argument():
"""
Validate |set_argument| can set supported attributes.
.. |set_argument| replace::
:func:`~ci_exec.parsers.cmake_parser.CMakeParser.set_argument`
"""
parser = CMakeParser()
# Test that unsupported attributes are failed out.
for kwargs in {"action": "store_false"}, {"nargs": 2, "type": list}:
with pytest.raises(ValueError) as ve_excinfo:
parser.set_argument("shared", **kwargs)
why = str(ve_excinfo.value)
assert why.startswith(f"Setting attribute{'' if len(kwargs) == 1 else 's'}")
# NOTE: str.format(set) used, can't assume order so just check `in`.
for key in kwargs:
assert key in why
assert why.endswith("not supported.")
# Changing generator choies not supported.
for arg in "-G", "generator":
with pytest.raises(ValueError) as ve_excinfo:
parser.set_argument(arg, help="GeNeRaToR", choices={"meh"})
assert str(ve_excinfo.value) == \
"Changing 'generator' attribute 'choices' is not supported."
# Cannot set value of argument that does not exist...
with pytest.raises(ValueError) as ve_excinfo:
parser.set_argument("blargh", help="BLARGH")
assert str(ve_excinfo.value) == "Cannot set attrs of 'blargh', argument not found."
# Change just one attribute of one argument.
generator = parser.get_argument("generator")
assert generator.default == "Ninja"
parser.set_argument("generator", default="Unix Makefiles")
assert generator.default == "Unix Makefiles"
# Change multiple attributes of a different argument.
build_type = parser.get_argument("build_type")
assert build_type.default == "Release"
assert set(build_type.choices) == \
{"Release", "MinSizeRel", "RelWithDebInfo", "Debug"}
parser.set_argument("--build-type", choices={"Release", "Debug"}, default="Debug")
assert build_type.default == "Debug"
assert set(build_type.choices) == {"Release", "Debug"}
@unset_env("CC", "CXX")
def test_cmake_parser_extra_args():
"""
Validate |add_extra_args| works as described.
.. |add_extra_args| replace::
:attr:`~ci_exec.parsers.cmake_parser.CMakeParser.add_extra_args`
"""
parser = CMakeParser()
cc, cxx = default_cc_cxx()
base_configure_args = [
"-G", "Ninja",
f"-DCMAKE_C_COMPILER={cc}",
f"-DCMAKE_CXX_COMPILER={cxx}",
"-DCMAKE_BUILD_TYPE=Release"
]
# No extra args given.
args = parser.parse_args([])
assert args.cmake_configure_args == base_configure_args
assert args.cmake_build_args == []
# Extra args with nothing else set.
args = parser.parse_args(["--", "-DMYLIB_OPTION=ON", "-Werr=dev"])
assert args.cmake_configure_args == base_configure_args + [
"-DMYLIB_OPTION=ON", "-Werr=dev"
]
assert args.cmake_build_args == []
# Extra args with explicit args given.
args = parser.parse_args([
"-G", "Ninja",
"--cc", cc,
"--cxx", cxx,
"--build-type", "Release",
"--", "-DMYLIB_OPTION=OFF", "-DCMAKE_CXX_FLAGS=-Werror"
])
assert args.cmake_configure_args == base_configure_args + [
"-DMYLIB_OPTION=OFF", "-DCMAKE_CXX_FLAGS=-Werror"
]
assert args.cmake_build_args == []
@unset_env("CC", "CXX")
def test_cmake_parser_shared_or_static(capsys):
"""Validate ``--shared`` and ``--static`` |CMakeParser| options."""
def validate_shared_and_or_static(parser: CMakeParser):
"""Verify -DBUILD_SHARED_LIBS is correct with --shared vs --static."""
# Specifying --shared: -DBUILD_SHARED_LIBS=ON
args = parser.parse_args(["--shared"])
assert args.shared
assert not args.static
assert "-DBUILD_SHARED_LIBS=ON" in args.cmake_configure_args
# Specifying --shared: -DBUILD_SHARED_LIBS=OFF
args = parser.parse_args(["--static"])
assert args.static
assert not args.shared
assert "-DBUILD_SHARED_LIBS=OFF" in args.cmake_configure_args
# Specifying both: error
with pytest.raises(SystemExit) as se_excinfo:
args = parser.parse_args(["--shared", "--static"])
assert se_excinfo.value.code == 2
captured = capsys.readouterr()
assert captured.out == ""
assert "argument | |
-0.17805460405137996], [2.5278812537929483, -3.9412959574596886,
-0.1516720332141368, -2.385490768218621], [2.0634420046107511, 3.7303717046166387, -3.7967716979561583,
-0.36579638919369373]], [[-0.89677692789005903, -0.33159623329473931, -2.0783805922287799, 3.3237758621528677],
[1.8764406996924805, 3.8567013916314448, 2.4876054261100879, -3.122046411865298], [-3.9505368448428069,
-3.9474451391708176, 0.76222063661286921, 2.065165407462576]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[1.0592728936584552, 0.032560898684288464, -8.5292561305554226, -0.14093526530513639],
[-0.93515800400241478, 19.099335624075504, -0.079617164548639499, -0.42831638863356958], [-10.388720575054998,
-12.647489860286688, -0.81523713423442645, 7.0950099465605785]], [[-2.9363681584872698, -6.6229310429171893,
4.1311530815811848, -8.0573135077187672], [-6.584787059098673, 14.508295027869941, 18.496797973906915,
-0.27692236086049277], [6.3553678588617721, 6.2328500602902022, 0.75108843274136661, 2.0647742706124967]]],
[[[2.0193173850265365, -4.4161377189515036, -19.851925322587327, 7.0412805631986641], [0.35744616523310901,
-6.7429729026590115, -4.8680306428853894, 0.94115996924851042], [12.129087268589474, -5.0059436486027034,
-0.033217382128710969, -1.9073712167890253]], [[-10.815413171058387, 14.028808362935354, -10.515302901880693,
0.99242267641760229], [-6.3626775794058616, -5.8343695859208742, -2.2365267016703343, -7.6675145471157151],
[13.600601636143761, -0.6644698681936374, -9.1974441340729562, 10.10920321894068]]], [[[5.7665796814044832,
-0.029950178836461127, 2.3694584183777816, -0.57180774483350039], [6.1932109200372141, -10.938622839032124,
0.71067144357861733, 5.2777750094174012], [-3.2852098569207846, -17.529448442415831, -7.1618400125346513,
0.90435962385794955]], [[-1.4913107362062921, 1.2593646828759113, 5.76422972394219, -2.1425757917908261],
[-1.6652158722689767, 9.6775449400565687, 3.7948150260119111, -7.0074506472637719], [4.5830800930287374,
5.2719361490107728, -0.76732802340590534, 7.4467689202114986]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(3.142013671,self.functionspace)
arg0.setTaggedValue(1,-2.04077395087)
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-0.905206509275)
sub=res.substitute({arg1:s1})
ref=Data(-2.84417122722,self.functionspace)
ref.setTaggedValue(1,1.84732186428)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank1(self):
arg0=Data(1.54368119889,self.functionspace)
arg0.setTaggedValue(1,-0.973182859739)
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([-0.97051483006179051, -4.8243289242685101])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-1.4981654964063673, -7.4472258576349226]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.94448839773843207, 4.6949542188401541]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank2(self):
arg0=Data(0.576275206322,self.functionspace)
arg0.setTaggedValue(1,-0.446417285252)
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[-3.7798020411794298, 2.8125776443752777, -0.74593700484018655, 4.9042983986909512,
-1.1062378936297144], [1.3747147971013396, 1.7312150406230939, -2.4865059810459189, -3.9444781957615138,
-4.8713070674060148], [4.7677542872819085, -0.65669250050514094, -2.2966507465733335, 4.6331137703181184,
-4.2587467390331817], [-2.2122452558031123, -0.89486317692759698, -2.7263171047505361, 1.4136050574112167,
1.5057522304514919]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-2.1782062011384205, 1.6208187623100703, -0.42986500136777495, 2.8262255715721345,
-0.63749747039309623], [0.79221405333400008, 0.99765630472347699, -1.4329117472490631, -2.2731049860965866,
-2.8072134853290578], [2.7475385855977827, -0.37843560621895822, -1.3235028828319906, 2.6699485939051231,
-2.4542101557111002], [-1.2748620912236397, -0.5156874619142493, -1.5711089520403427, 0.81462554611800553,
0.86772767727381395]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.6873689660148408, -1.255583276563631, 0.33299917267007023,
-2.189363577211358, 0.49384371731752186], [-0.61369644771829823, -0.77284431862310299, 1.1100192498224006,
1.7608832478891809, 2.1746356766622736], [-2.1284079256789306, 0.29315888332112217, 1.0252645914581893,
-2.0683020716109741, 1.9011781578167486], [0.98758452140814967, 0.39948239011636527, 1.2170750806399457,
-0.63105773214859318, -0.6721938229809169]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank3(self):
arg0=Data(4.6954921918,self.functionspace)
arg0.setTaggedValue(1,3.80656545201)
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[3.3283491776040073, -3.9068878738203718], [-2.1695978858355423, -2.2223735496995447]],
[[-2.3211489651914921, 4.272069872491878], [1.636342469753175, -4.2787938517786497]], [[2.7410635950334186,
-3.5668158773147507], [-1.0064480730166228, 1.389332564769]], [[0.77463712529690731, -0.94041585508240466],
[3.6978341544417166, -2.6780892355753592]], [[-4.4954676727861065, -4.2409706282499835], [2.3785224394198679,
-4.1039517994892138]], [[-2.0175257524312817, 0.85038925666007348], [-3.2277420742959917, -3.855794844823607]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[15.628237575034362, -18.344761505774329], [-10.187329932293613, -10.43513764988427]],
[[-10.89893684206883, 20.05947072912349], [7.6834332898420676, -20.091043121362375]], [[12.870642707715545,
-16.747956101531248], [-4.7257690683049729, 6.5236002096908132]], [[3.6373025733125388, -4.4157153045874011],
[17.36315139876443, -12.574947094596416]], [[-21.10843335607106, -19.913444470614692], [11.168333542324792,
-19.270073630038475]], [[-9.4732764173030475, 3.9929961146407393], [-15.155837707011633,
-18.104854587064445]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[12.669578991701924, -14.871824405371543], [-8.258716356981127,
-8.4596103757528684]], [[-8.8356054598727027, 16.261913585211406], [6.2288447130233893, -16.28750885246448]],
[[10.434037982623636, -13.5773180922763], [-3.8311104639897189, 5.2885853424057174]], [[2.948706919001554,
-3.5797545044815582], [14.07604773957002, -10.194321961547963]], [[-17.112291933867024, -16.143532276496508],
[9.0540013447323737, -15.621961136660534]], [[-7.6798438277506191, 3.2370623651649013], [-12.286611468022571,
-14.677335446353782]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank0_Symbol_rank4(self):
arg0=Data(-1.72281700023,self.functionspace)
arg0.setTaggedValue(1,1.23448641864)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0*arg1
s1=numpy.array([[[[-4.3265612601968471, 4.9346403281714331, 2.8024200919119817, -3.9056671936664311],
[0.98069732637570617, -2.2852413242790757, 3.2075463029671312, -2.6966078512789116], [-2.1601129611240619,
-2.6503532815304762, -4.675750160645002, 3.2350739199006568]], [[-4.1760984392537086, 4.3345400423125842,
2.3141216950646779, 0.60673873055732486], [3.6895192429599195, -1.0430965175426432, 0.33936966744652075,
-4.9652989404647769], [0.016939166262534222, -3.5215478761207564, 0.96881594277756378, 2.4707123930500092]]],
[[[-4.0598585401879825, 0.32726568454206451, -3.8317591404661555, -4.8432615549786364], [-1.8707032325346216,
0.11786029243200069, -1.2644962697725761, 4.5016381310909193], [1.0052891428203132, 3.5573702004465542,
0.94853515124922705, -3.266716026917611]], [[4.4268917686602247, 1.7045644573811822, -4.2672635941058026,
4.4735466129490451], [-3.3659634968161098, -3.7740307778271154, -0.23936175808445981, 1.638694221507726],
[-2.6562820856857803, -1.8386899346245853, -3.8721446565337256, 2.2142808663189424]]], [[[-4.9689140219050429,
3.0036100506068504, 1.7161971518176031, 1.2296325439044953], [-4.2017528414854652, -1.8394187611478952,
-4.4722717389932569, -2.3151891625454821], [1.0583223957426515, 4.9808003293003509, -0.20896133794562566,
-3.9944246041361611]], [[-3.3354149131160451, 1.5689046088326091, 1.0657585673339192, -2.4003243575280555],
[0.12124021598431511, -1.1303400850693057, -1.9271523374197388, -1.7678094654193863], [1.3900959283471721,
1.5973269294693555, 3.1820328180383193, 0.020208485606988624]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[7.4538732915852082, -8.5014822473732448, -4.8280569761201289, 6.7287498384724653],
[-1.6895620259559927, 3.9370526030863635, -5.5260152997629799, 4.6457618491254991], [3.7214793318324864,
4.5660736900247683, 8.0554618655674215, -5.5734403461917594]], [[7.1946533857624484, -7.4676192730552966,
-3.9868081968486213, -1.045299799699541], [-6.3563664744313337, 1.7970644132987275, -0.58467183243782017,
8.554301425835547], [-0.029183083606744185, 6.06698254808967, -1.6690925763069098, -4.2565853134149645]]],
[[[6.9943933115474914, -0.56381888491958132, 6.601419787965443, 8.344053343456924], [3.2228793313878823,
-0.20305171545342732, 2.1784956702862215, -7.7554987011078413], [-1.7319292253931926, -6.1286978574257542,
-1.6341524838838573, 5.6279539060835297]], [[-7.626724397207207, -2.9366526251568561, 7.3517142643698472,
-7.7071021560908708], [5.7989391344540557, 6.5019643834157081, 0.41237650603182713, -2.8231702629851867],
[4.5762879346145349, 3.1677262775151811, 6.6709966416095421, -3.8148007197688423]]], [[[8.5605295495980389,
-5.1746704572343623, -2.9566936288903536, -2.1184318506694821], [7.2388512260579478, 3.1689819122397545,
7.7049057815666941, 3.988647247971739], [-1.823295815105068, -8.5810074820485838, 0.36000214540263886,
6.8816626141257302]], [[5.7463095151227659, -2.702935531829326, -1.8361069779390997, 4.1353196092052489],
[-0.20887470520881798, 1.9473691145940035, 3.3201308089314878, 3.0456122001844874], [-2.3948808973010833,
-2.7519019890081795, -5.4820602341926197, -0.034815522552537087]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-5.3410811151064834, 6.0917464659774998, 3.4595495427759313,
-4.8214931062905171], [1.2106575302027247, -2.8210993781265854, 3.9596723481566771, -3.3289257687890399],
[-2.66663011322566, -3.2718251306347788, -5.7721500702479123, 3.9936548173985087]], [[-5.1553368061426292,
5.3509308132656104, 2.8567518036265622, 0.74901072253301881], [4.554661396727484, -1.2876884842321341,
0.41894724535949618, -6.1295941064678363], [0.020911170694103174, -4.3473030256449352, 1.1959901235162222,
3.0500608935740701]]], [[[-5.0118402294424564, 0.40400504285255151, -4.7302546183870042, -5.9789406115194401],
[-2.3093577338610776, 0.14549693030368552, -1.5610034714491989, 5.5572111344423751], [1.2410157936131636,
4.39152519850908, 1.1709537618153174, -4.0327165687679427]], [[5.4649377651792932, 2.104261672325459,
-5.2678789516603199, 5.5225325368173852], [-4.1552362224415518, -4.6589897387390575, -0.29548883949592541,
2.0229457607473758], [-3.2791441588432502, -2.2698377523754134, -4.7801099894819963, 2.7334996565146623]]],
[[[-6.1340568754081053, 3.7079158143505366, 2.1186220756193546, 1.5179646753620302], [-5.1870068172759387,
-2.2707374788199806, -5.5209587222334928, -2.85806957773395], [1.3064846240818391, 6.1487303604553372,
-0.25795993371372866, -4.9310629240686339]], [[-4.1175244107552791, 1.9367914317381283, 1.3156644769179004,
-2.963167819687802], [0.14966940002504314, -1.3953894834570877, -2.3790433871858725, -2.1823367757951],
[1.7160545441447639, 1.9718784005502887, 3.9281762975200034, 0.024947101023013681]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([-0.099233059085104713, 4.771977048069223]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([3.9729085267773208, 4.512809517509826]))
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(0.26176969234)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-0.025976207346631468, 1.249158963725014]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0399870427481233, 1.1813167589860505]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([0.84702689091359229, -3.3372769586299422]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([-2.152707415414048, 1.9005183627662312]))
arg1=Symbol(shape=(2,))
res=arg0*arg1
s1=numpy.array([0.22148437875716098, 4.0581595354793194])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([0.18760322472460655, -13.543202312199522]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.47679106454891407, 7.7126067162133252]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[-4.9957974390113735, 4.9127660926149055, -0.033400607153987849, -4.5745875540405283,
-3.40034229393263], [4.6311740546030773, 4.0795994583149682, 4.8540687237153293, 4.9306788508967045,
3.1060981817064288], [-1.3242874361820456, -3.3839454855009707, 0.088505407790738566, -4.4328915815516297,
-3.0958370529970693], [-2.3333608177639089, -4.3040231210385214, 4.1339174077369343, -4.5703847879440351,
-3.3406709387044389]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[-0.78277733029185015, 4.3160080804422201, -3.0818130197239957,
1.7055665928942068, -3.3364799279772583], [4.5669273627829092, 3.6704066897391243, 2.6983979089447621,
-1.2237350853460538, -0.1257348607090929], [2.1891872096029914, -0.7503980382583979, 1.9746042593444724,
-2.0584330310875232, -0.7673935307397155], [-0.23746062225782705, 2.8663010003293437, 3.8185722602896526,
4.8671017855990222, -1.9042813962136051]]))
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-0.123633480243)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[0.61764782397173934, -0.60738236964752024, 0.0041294333046613622, 0.56557217998023424,
0.4203961518147894], [-0.57256816597952853, -0.50437507902705625, -0.60012540964938543, -0.60959698629464243,
-0.38401772817938795], [0.16372626457665548, 0.41836895732351315, -0.01094223158545413, 0.54805381376508067,
0.38274910912583321], [0.28848151856172877, 0.53212135749834688, -0.51109059615373209, 0.56505257738107939,
0.41301877449713931]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.096777485598944848, -0.53360309973999165, 0.38101526908524591,
-0.21086513366492038, 0.41250062525520415], [-0.56462512387573127, -0.4537851529579448, -0.33361232456203066,
0.15129462749623307, 0.015545038417276023], [-0.27065683362567039, 0.092774321037059626, -0.24412719668449809,
0.25449123947954888, 0.09487553292096082], [0.029358083150294065, -0.35437076809338752, -0.47210337809722808,
-0.60173673244828274, 0.23543293637500923]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-2.5363493699555439, 1.9315826441262116, -4.797663921800063, -2.3131658171459835,
-1.4174075473244754], [-0.4937783451212221, -4.7652740781432534, 1.5781017135273068, -4.2362357361072114,
-3.918073606586808], [2.094919785395116, 1.3684348598821918, -4.2376402301126852, -1.6050592311847534,
3.151025223042982], [-2.6417620339476366, 0.27296872857386667, -1.4323869283247213, -4.6402797342361799,
-3.9199666124863741]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[-0.69802384928264605, 0.87453186333400357, 0.81014803662176149,
-4.214756633799734, 0.78685864942817574], [0.48682400140861759, -2.7486583171634758, 0.40554914153896249,
-1.7609786982015061, -0.39145780725801416], [1.2068445571926318, 0.18236245525374706, 4.017808328373075,
-1.0950851034750277, 0.12173582687690843], [0.22180579191198468, -4.2110674925319236, 2.9122016067639365,
4.5406571257464297, 3.0637655540581346]]))
arg1=Symbol(shape=(4, 5))
res=arg0*arg1
s1=numpy.array([[-2.7914057802277092, -3.2955192040483841, -0.4909883356152811, -4.414815259808397,
1.1535659837090115], [0.30062418712185313, 4.6879078677821262, 2.641934893458421, -4.6213986057014331,
2.2307025160830776], [4.0559589148649486, -0.33010334091372151, -1.5019795108463163, 1.5894091005782052,
4.3064711265533191], [2.9888346593766579, -4.5884630123207506, 2.4921626108815289, -3.5186629218511625,
-1.0861727773454932]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[7.0799802919708137, -6.3655676979244857, 2.3555970238060953, 10.212199748003247,
-1.6350731316459357], [-0.14844171362044126, -22.339165843065977, 4.1692419823943165, 19.577333924268451,
-8.7400566524118908], [8.4969085795002854, -0.45172491906991191, 6.364848799967322, -2.5510957490121045,
13.569799142075835], [-7.895789928888072, -1.2525069145814096, -3.5697411470863107, 16.327580247874213,
4.2577610225859299]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.9484678076243735, -2.8820365501694258, -0.39777323610290649,
18.607371903277738, 0.90769337196755784], [0.14635106969487355, -12.885456950875437, 1.071434428043893,
8.1381845005383653, -0.87322591559081664], [4.8949119406016965, -0.060198455736490949, -6.0346657877240473,
-1.7405382293708347, 0.52425182351249966], [0.66294083851702656, 19.32232743186902, 7.2576799597261958,
-15.977041869203234, -3.3277787409867781]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[0.20286433747766175, 2.066180723397264], [-3.1327754856901735, -1.1194293005675835]],
[[-1.7914925359739922, 1.275772548969373], [-2.3842819867614953, -3.1968139299234077]], [[-3.5171630188865488,
-1.4300055611015186], [4.2854751694367756, 1.7799374077309524]], [[-4.2108803597952917, -1.5964309596888695],
[2.7414856168787471, 1.1873651110226469]], [[-3.5913507733928229, -1.3017853623346696], [-0.13258097661378798,
-3.1022689591044426]], [[4.9076894073951749, 2.8964781538465161], [2.969217301725779,
1.8197050412291595]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[3.7913962593423918, 4.4765688935993317], [1.4500345756448763,
4.8443295010220275]], [[1.7391435441206662, 4.4187921026078829], [-4.2876409136689784, -4.1427096121048841]],
[[0.11488507950500271, 2.1339510129273167], [-2.8155795121378926, -4.6369329094888991]], [[0.67434242728218052,
4.9095299484356563], [0.94463745614236405, 3.557499141589803]], [[0.038621679734069048, -0.10332111066950311],
[-2.0403842705827979, 1.0573287011436552]], [[-2.1400599935190945, 4.2642563454671869], [3.9163707220927186,
-2.5416950274474726]]]))
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(3.22032001333)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[0.65328808596966281, 6.6537631347069341], [-10.088539593828681, -3.6049205801225788]],
[[-5.7691792673232181, 4.108395871899436], [-7.6781509993834112, -10.294763877415299]], [[-11.326390459854416,
-4.6050755275843107], [13.800601454753718, 5.7319680565855959]], [[-13.560382296374918, -5.1410185693811012],
[8.8284609982830986, 3.8236956301526179]], [[-11.565298770434771, -4.1921654553826446], [-0.42695317237583746,
-9.9902988157275558]], [[15.804330417828238, 9.3275865669967502], [9.5618299006647014,
5.8600325626225302]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[12.209509252413918, 14.41598439909561], [4.6695753639655058,
15.60029124329226]], [[5.6005987611804535, 14.229924642760029], [-13.807575844248428, -13.340850673264026]],
[[0.36996672076263859, 6.8720051543895364], [-9.0670670520514882, -14.932407848882294]], [[2.1715984144123999,
15.810257548976251], [3.0420349053536828, 11.456285683055723]], [[0.12437416819593333, -0.33272704048818802],
[-6.5706903014356595, 3.4049367769580914]], [[-6.8916780268502658, 13.732270051265186], [12.6119670159636,
-8.1850713646631412]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-3.0362430129414495, -1.6103362752021533], [3.0322276873410949, 2.889681860828258]],
[[3.0356323227798061, 3.8849951306068178], [3.1682099248345352, 1.252560279633526]], [[-2.422448371330125,
-4.3051891736441767], [3.2099062879412248, 3.0454833071508354]], [[-1.1376898513557334, 0.97676409380038631],
[1.0009530341765513, -3.085670706338802]], [[3.7338110619145226, -3.4624334476005911], [-1.9009045069833541,
0.020021974502883566]], [[2.2281987737323306, -2.9210437430011229], [-1.3860392623437132,
0.463839486811219]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[4.1305451146381422, 2.3814888644055499], [-0.89220558616836776,
3.8418880701208664]], [[3.3492033714884197, 2.1415021426686387], [1.4787086763681101, 0.38743271004052637]],
[[-4.99502836998282, 4.4948912080047858], [-3.7653670133834769, -4.0800035996907447]], [[-3.1718016142315641,
2.320405525041048], [-2.8237839197556944, 3.5858545025811086]], [[3.0016852702625556, -2.8784349824436584],
[-3.7323763876968008, 0.63313826152617381]], [[1.7585155020491481, -3.655987828892715], [0.54081193002197825,
4.9685421412273278]]]))
arg1=Symbol(shape=(6, 2, 2))
res=arg0*arg1
s1=numpy.array([[[-0.93345150384204523, -3.5420417816351479], [-3.5444663998891226, 2.3971219306415996]],
[[0.8384895676298747, -4.3543886014540583], [1.9437605664303446, -3.0820690979415186]], [[4.9690708795309,
-2.1112283314766356], [2.37080840790198, 4.7216280449696395]], [[-3.3735779321675219, 3.0438054138693342],
[-0.47550686063032277, 2.7155331110677619]], [[-1.3378665576363566, -2.6741065199531286], [-0.22177834028631249,
0.61204525154245371]], [[4.0531432724462295, -4.0695297515588145], [3.928681336032259, -4.8729434946660577]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[2.8341856064600983, 5.7038783692487431], [-10.74762915459401, 6.9269197611686435]],
[[2.5453460338109117, -16.916778513418848], [6.1582415180666157, -3.8604773311674778]], [[-12.03731765914358,
9.0892373557640713], [7.6100728160284898, 14.379639393530271]], [[3.8380853761846505, 2.97307983678279],
[-0.47596003491968808, -8.3792409729148645]], [[-4.9953409522681316, 9.2589158571325303], [0.4215794466015394,
0.012254354420993967]], [[9.0312088694261341, 11.88727441774779], [-5.4453065809776664,
-2.2602636098259725]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-3.8556635489463869, -8.4353330602232983], [3.162392721967159,
9.2094741479570601]], [[2.8082720868638438, -9.3249325200257633], [2.8742556143627427, -1.1940943831476432]],
[[-24.820650015712328, -9.4897416652449422], [-8.9269637741663139, -19.264259419876904]], [[10.700319930984929,
7.0628628994922567], [1.3427286267814176, 9.737506633230419]], [[-4.0158543396339219, 7.6972417538137563],
[0.82776024058721887, 0.3875092665369389]], [[7.1275152766229084, 14.87815124101582], [2.1246777357809301,
-24.211425105067871]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_mult_overloaded_taggedData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[0.083529595185620309, 4.372823949648037, 4.0009796310147365, 0.68196650279571092],
[-3.6991633277760947, -2.7548332509536966, -1.0695165519831527, -4.9760591951223629], [2.4321680119150511,
3.8212450023110254, 0.8004705884830452, -2.0444757685121964]], [[-3.4279885095627494, 3.5526615517407674,
-0.37278949561560104, 1.8208812052515633], [-1.3542092169449638, 4.6164533745863388, 0.70466827998486359,
4.8882907830017537], [-1.0014606132552197, 0.027094091914280583, -0.50731223435619732, 1.3858925949581025]]],
[[[-0.92819420758339621, -0.63572501286400218, 1.5822275384230347, -4.1213856389411898], [0.019204126745418826,
1.2601369851282858, 3.1282675256554278, -1.2005085063042245], [0.31394545189514567, 4.3373088139034053,
-4.2967881792088045, -3.2133421015779429]], [[-4.6394850485838566, 2.7899856599199682, 3.4214279590576524,
0.75816457866836107], [2.6716538166314328, -0.78329465377730312, 0.9411007620209233, -4.3621528303217216],
[-0.21060811641931387, -1.0884058600082236, 3.3643361086045402, -0.59658223764974405]]], [[[-2.7722966748649869,
1.3359537198967564, 3.3994221388292836, 0.89269410005117322], [-2.5434807639867083, -2.2312407449400631,
2.1964509494368221, -3.8483462591031992], [-4.617770174657271, -4.0164566401957895, -2.0915606068178807,
1.3098480489351907]], [[-4.000475916402392, 3.4797401237531425, 4.727298203954307, -1.3658950385993265],
[4.3822054513768176, 4.7641649434095044, 2.2480529159500593, -3.370947660818576], [-0.12763750951483388,
-0.56331578609421484, 1.1108900947641267, 2.3086655633422826]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-0.99642554217400114, -3.1989491729058739, -0.40653827121759534,
-3.7494299986207311], [-4.962891998625155, 0.65199311462416087, -1.5646394746401904, -0.58561931277306201],
[-1.9266349397638294, -2.1350741612611923, -1.3141253220586746, 3.0489459946325113]], [[-2.4730670458068271,
-1.0264935833023214, -2.4514436230760808, -2.4564197681665032], [-0.48365426443540827, -0.92587219714034585,
-4.1751007096042496, -4.5526966630634966], [-2.3782862353872791, -2.5275939067663735, -2.6938709700385766,
-4.8403251647207153]]], [[[3.2867188889910004, -1.4242104085047957, 2.0936224809849646, -1.4601757832869966],
[-0.21399139868108641, -0.98887005738367506, -2.261387321435977, -3.6513388135428149], [2.9334655428664806,
-3.9524701563078288, 3.4584296338361185, 4.5958550113660674]], [[0.37781815561656451, -3.0593937474827717,
-2.0739947527751279, -2.4165789597896401], [4.5330104520530448, 4.9794431912053145, 1.7661478112868867,
3.078941742057026], [4.9858586211966696, -3.1080213069928195, -4.2716128061474183, -1.5876111863601041]]],
[[[0.90451414172461853, 0.1162545327223361, 3.2911315914907693, -1.4337863404739979], [2.0405912462551932,
4.8936580709384394, -1.1291930809589745, 2.111861338433255], [-2.0913683111797732, -1.55247331778124,
4.9769696268492716, -0.24856367420835213]], [[-2.1381113867577026, 1.6110287228762354, 0.19273167692598125,
-1.1320874579780638], [1.2876584378472149, 0.79519349199575018, -3.7974642196600819, -4.2341641389677163],
[-4.3323767453858073, -0.80301234928598664, 4.344905698376083, -0.27642913101571054]]]]))
arg1=Symbol(shape=())
res=arg0*arg1
s1=numpy.array(-1.40149736096)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-0.11706650721432915, -6.1285012253547855, -5.6073623941039141, -0.955774253928249],
[5.1843676416216082, 3.8608915310841012, 1.4989246251026278, 6.9739338299224034], [-3.4086770500993597,
-5.3554647863033438, -1.1218574172814939, 2.8653273941073367]], [[4.8043168495380142, -4.9790457891328019,
0.52246349429720151, -2.5519602037735591], [1.897920643729998, -6.4699472214569242, -0.98759073474784653,
-6.8509266319601139], [1.4035444065780798, -0.037972298315350331, 0.71099675763065506, -1.9423248144015677]]],
[[[1.3008617323823231, 0.89096692782230835, -2.2174877195310199, 5.7761110964560265], [-0.02691453295315966,
-1.7660786590997284, -4.3842586815689133, 1.6825095033900148], [-0.43999372231503248, -6.0787268563347068,
6.0219372937459816, 4.5034904752087277]], [[6.5022260517828334, -3.9101575394815598, -4.7951222553187058,
-1.0625656561736561], [-3.7443157733956758, | |
extraction
for list_name in extraction:
markup[page_id][list_name]['extract'] = ' '
if list_name not in names:
names[list_name] = []
if 'sequence' in extraction[list_name]:
for sequence_item in extraction[list_name]['sequence']:
if 'sub_rules' in sequence_item:
for item_name in sequence_item['sub_rules']:
if item_name not in names[list_name]:
names[list_name].append(item_name)
return markup, names
def rulesToMarkup(self, rule_set, remove_html = False):
markup = {}
counts = {}
for name in rule_set.names():
counts[name] = 0
for page_id in self._pages:
markup[page_id] = {}
names = []
for page_id in self._pages:
page_string = self.getPage(page_id).getString()
extraction = rule_set.extract(page_string)
for name in rule_set.names():
if name in extraction:
if extraction[name]:
extract = extraction[name]['extract']
if remove_html:
processor = RemoveHtml(extract)
extract = processor.post_process()
extract = extract.strip()
if extract:
markup[page_id][name] = {}
markup[page_id][name]['extract'] = extract
counts[name] = counts[name] + 1
if name not in names:
names.append(name)
return markup, names
def learnAllRules(self, in_list = False):
if not self._stripes:
return RuleSet()
rule_set = RuleSet()
previous_stripe = None
count = 0
values = []
for stripe in self._stripes:
stripe_text = stripe['stripe']
if stripe_text not in cachedStopWords:
if previous_stripe is not None:
num_with_values = 0
rule_stripes = BoundingStripes()
rule_stripes.bounding_stripes.append(previous_stripe)
rule_stripes.bounding_stripes.append(stripe)
for page_id in self._pages:
rule_stripes.page_ids.append(page_id)
if(previous_stripe['page_locations'][page_id] + previous_stripe['tuple_size'] != stripe['page_locations'][page_id]):
##TODO: figure out if the stuff in the middle is visible!!
token_locations = range(previous_stripe['page_locations'][page_id]+previous_stripe['tuple_size'], stripe['page_locations'][page_id])
for token_index in token_locations:
if self.getPage(page_id).get_token(token_index).visible:
num_with_values += 1
break
if num_with_values > 1:
begin_stripes = self.getNextLevelStripes(rule_stripes,'begin', False)
end_stripes = self.getNextLevelStripes(rule_stripes,'end', False)
start_rule = self.buildRule(begin_stripes)
end_rule = self.buildRule(end_stripes)
strip_end_regex = ''
if len(end_stripes) > 0:
strip_end_regex_stripes = []
strip_end_regex_stripes.append(end_stripes[-1])
strip_end_regex = self.buildRule(strip_end_regex_stripes)
rule_name = ''
visible_chunk_before = ''
visible_chunk_after = ''
if not in_list:
#get visible chunk(s) before
(visible_chunk_before, visible_chunk_after) = self.__get_visible_chunk_buffers(begin_stripes, end_stripes)
rule_name = ''.join(visible_chunk_before.split())
rule_name = rule_name+format(count, '04')
rule = ItemRule(rule_name, start_rule, end_rule, True, strip_end_regex)
if len(visible_chunk_before) > 0:
rule.set_visible_chunk_before(visible_chunk_before)
if len(visible_chunk_after) > 0:
rule.set_visible_chunk_after(visible_chunk_after)
# rule = ItemRule('slot'+format(count, '04'), start_rule, end_rule, True, strip_end_regex)
new_values = ''
for page_id in self._pages:
extraction_list = rule.apply(self.getPage(page_id).getString())
new_values += json.dumps(flattenResult(extraction_list), sort_keys=True, indent=2, separators=(',', ': '))
if new_values not in values:
rule_set.add_rule(rule)
values.append(new_values)
previous_stripe = stripe
count += 1
return rule_set
def learnRulesFromMarkup(self, page_markups):
#First create a key based markup dictionary instead of page based
page_ids = list()
keys = list()
for page_markup in page_markups:
page_ids.append(page_markup)
keys.extend(page_markups[page_markup])
keys = list(set(keys))
key_based_markup = {}
for key in keys:
if key not in key_based_markup:
key_based_markup[key] = []
for page_id in page_ids:
if key in page_markups[page_id]:
key_based_markup[key].append({page_id:page_markups[page_id][key]})
rule_set = RuleSet()
#print key_based_markup
for key in key_based_markup:
#Because we have the EXACT stripes on each side, we should be able to learn these rules
#without testing. We will only learn the "top level" rules for now and create a RuleSet
#form them.
#print "key:", key
pages = key_based_markup[key]
(rule, isSequence, hasSubRules) = self.__learn_item_rule(key, pages)
if not rule and not isSequence:
continue
elif isSequence:
rule = self.__learn_sequence_rule(key, pages, rule)
if hasSubRules:
sub_rules_markup = {}
sub_rules_page_manager = PageManager(self._WRITE_DEBUG_FILES, self.largest_tuple_size)
for page in pages:
page_id = page.keys()[0]
real_page = self.getPage(page_id)
sub_page_extract = rule.apply(real_page.getString())
if sub_page_extract['extract']:
sub_page_id = page_id + "_sub"
if sub_page_id not in sub_rules_markup:
sub_rules_markup[sub_page_id] = {}
for item in page[page_id]:
if item not in ['begin_index', 'end_index', 'starting_token_location', 'ending_token_location', 'extract', 'sequence', 'sequence_number']:
sub_rules_markup[sub_page_id][item] = page_markups[page_id][key][item]
sub_rules_page_manager.addPage(sub_page_id, sub_page_extract['extract'])
sub_rules_page_manager.learnStripes(sub_rules_markup)
sub_rules = sub_rules_page_manager.learnRulesFromMarkup(sub_rules_markup)
rule.set_sub_rules(sub_rules)
if rule:
rule_set.add_rule(rule)
return rule_set
#get all visible text before and after until hit a visible slot
#
#until hit visible slot
def __get_visible_chunk_buffers(self, begin_stripes, end_stripes):
visible_chunk_before = ''
visible_chunk_after = ''
page = self.getPage(self.seed_page_id)
#get visible chunk(s) before
if begin_stripes:
real_stripe = self.getStripeFragmentsForSlot(begin_stripes[-1])
start_location = real_stripe[-1]['page_locations'][self.seed_page_id] + real_stripe[-1]['tuple_size'] - 1
end_location = real_stripe[0]['page_locations'][self.seed_page_id]
visible_token_count = 0
for i in range(start_location, end_location, -1):
token = page.tokens[i]
if token.visible:
visible_chunk_before = token.getTokenWithWhitespace() + visible_chunk_before
if token.token not in cachedStopWords and token.token not in string.punctuation:
visible_token_count = visible_token_count + 1
elif visible_token_count > 0:
break
#and after
if end_stripes:
real_stripe = self.getStripeFragmentsForSlot(end_stripes[0])
start_location = real_stripe[0]['page_locations'][self.seed_page_id]
end_location = real_stripe[-1]['page_locations'][self.seed_page_id] + real_stripe[-1]['tuple_size']
visible_token_count = 0
for i in range(start_location, end_location):
token = page.tokens[i]
if token.visible:
visible_chunk_after += token.getTokenWithWhitespace()
visible_token_count = visible_token_count + 1
elif visible_token_count > 0:
break
return (visible_chunk_before.strip(), visible_chunk_after.strip())
def __learn_item_rule(self, key, pages):
isSequence = False
hasSubRules = False
for page in pages:
if 'sequence' in page[page.keys()[0]]:
isSequence = True
for item in page[page.keys()[0]]:
if item not in ['begin_index', 'end_index', 'starting_token_location', 'ending_token_location', 'extract', 'sequence', 'sequence_number']:
hasSubRules = True
logger.info('Finding stripes for %s', key);
exact_bounding_stripes = self.getExactBoundingStripesForKey(pages, isSequence)
rule = None
if exact_bounding_stripes is not None:
#print "exact bounding stripes:", exact_bounding_stripes
begin_stripes = self.getNextLevelStripes(exact_bounding_stripes,'begin')
#TODO: Figure out last mile if we can
start_points = {}
begin_goto_points = {}
end_goto_points = {}
#find the locations AGAIN for now... TODO: Fix this!
for page in pages:
page_id = page.keys()[0]
if 'extract' in page[page_id]:
extract = page[page_id]['extract']
#print "extract:", extract
#TODO: ADD starting_token_location and end_token_location
shortest_pairs = self.getPossibleLocations(page_id, extract, False)
begin_stripe = exact_bounding_stripes.bounding_stripes[0]
end_stripe = exact_bounding_stripes.bounding_stripes[1]
for pair in shortest_pairs:
if begin_stripe['page_locations'][page_id]+begin_stripe['tuple_size'] <= pair[0] and \
end_stripe['page_locations'][page_id]+end_stripe['tuple_size'] >= pair[1]:
start_points[page_id] = begin_stripe['page_locations'][page_id]+begin_stripe['tuple_size'] + 1
if begin_stripe['page_locations'][page_id]+begin_stripe['tuple_size'] != pair[0]:
begin_goto_points[page_id] = pair[0] - 1
if end_stripe['page_locations'][page_id]-1 != pair[1]:
end_goto_points[page_id] = pair[1] + 1
break
if begin_goto_points:
last_mile = self.__find_last_mile(start_points, begin_goto_points, 'begin')
if last_mile:
logger.info("Begin last mile: %s", last_mile['stripe'])
begin_stripes.append(last_mile)
else:
logger.info("Could not learn begin last mile!!!")
#print "begin stripes:", begin_stripes
start_rule = self.buildRule(begin_stripes)
#print "startrule:", start_rule
end_stripes = self.getNextLevelStripes(exact_bounding_stripes, 'end')
if end_goto_points:
last_mile = self.__find_last_mile(start_points, end_goto_points, 'end')
if last_mile:
logger.info("End last mile: %s", last_mile['stripe'])
end_stripes = []
end_stripes.append(last_mile)
else:
logger.info("Could not learn end last mile!!!")
#print "end stripes:", end_stripes
end_rule = self.buildRule(end_stripes)
#print "endrule:", end_rule
strip_end_regex = ''
if len(end_stripes) > 0:
strip_end_regex_stripes = []
strip_end_regex_stripes.append(end_stripes[-1])
strip_end_regex = self.buildRule(strip_end_regex_stripes)
#TODO: HACK for ISI to not get HTML for extractions
rule = ItemRule(key, start_rule, end_rule, True, strip_end_regex, None, not isSequence)
# rule = ItemRule(key, start_rule, end_rule, True, strip_end_regex)
(visible_chunk_before, visible_chunk_after) = self.__get_visible_chunk_buffers(begin_stripes, end_stripes)
if visible_chunk_before:
rule.set_visible_chunk_before(visible_chunk_before)
if visible_chunk_after:
rule.set_visible_chunk_after(visible_chunk_after)
return (rule, isSequence, hasSubRules)
def __learn_sequence_rule(self, key, pages, item_rule):
if item_rule is None:
#This is the case where we are not given the start and end of the list so we need to learn it based on number 1 and last
# Unless the markup contains starting_token_location and ending_token_location
for page_markup in pages:
extract = u''
starting_token_location = -1
ending_token_location = -1
page_id = page_markup.keys()[0]
if 'sequence' in page_markup[page_id]:
highest_sequence_number = 0
for item in page_markup[page_id]['sequence']:
sequence_number = item['sequence_number']
if sequence_number == 1:
extract = extract + item['extract']
if 'starting_token_location' in item:
starting_token_location = item['starting_token_location']
elif sequence_number > highest_sequence_number:
highest_sequence_number = sequence_number
end_extract = item['extract']
if 'ending_token_location' in item:
ending_token_location = item['ending_token_location']
if starting_token_location > 0 and ending_token_location > 0:
page_markup[page_id]['starting_token_location'] = starting_token_location
page_markup[page_id]['ending_token_location'] = ending_token_location
#update stripes to remove these
list_range = range(starting_token_location, ending_token_location)
stripes_to_remove = []
for stripe in self._stripes:
if stripe['page_locations'][page_id] in list_range:
stripes_to_remove.append(stripe)
self._stripes = [x for x in self._stripes if x not in stripes_to_remove]
if extract and end_extract:
page_markup[page_id]['extract'] = extract + LONG_EXTRACTION_SEP + end_extract
(item_rule, isSequence, hasSubRules) = self.__learn_item_rule(key, pages)
if item_rule is None:
return None
#adding the stuff for the beginning and end of the list.
#now set up the sub page manager and re run to learn the iteration rule
begin_sequence_page_manager = PageManager(self._WRITE_DEBUG_FILES, self.largest_tuple_size)
begin_sequence_starts = {}
begin_sequence_goto_points = {}
end_sequence_page_manager = PageManager(self._WRITE_DEBUG_FILES, self.largest_tuple_size)
end_sequence_markup = {}
end_sequence_starts = {}
end_sequence_goto_points = {}
num_with_nothing_at_begin = 0
num_with_sequence = 0
num_with_nothing_at_end = 0
#This is for any sub_rules in the sequence
sub_rules_markup = {}
sub_rules_page_manager = PageManager(self._WRITE_DEBUG_FILES, self.largest_tuple_size)
for page_markup in pages:
page_id = page_markup.keys()[0]
if 'sequence' in page_markup[page_id]:
logger.info("Getting iteration rule info for ... %s", page_id)
num_with_sequence = num_with_sequence + 1
page = self.getPage(page_id)
page_string = page.getString()
full_sequence = item_rule.apply(page_string)
location_finder_page_manager = PageManager(self._WRITE_DEBUG_FILES, self.largest_tuple_size)
| |
<gh_stars>100-1000
# encoding: UTF-8
# 选股引擎
import os
import sys
import logging
import importlib
import traceback
import pandas as pd
import numpy as np
from collections import OrderedDict
from typing import List, Any, Callable
from datetime import datetime, timedelta
from functools import lru_cache
from concurrent.futures import ThreadPoolExecutor
# 华富资产
from vnpy.event import EventEngine, Event
from vnpy.trader.constant import Exchange, Interval # noqa
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.event import EVENT_TIMER # noqa
from vnpy.trader.util_wechat import send_wx_msg
from vnpy.trader.util_logger import setup_logger
from vnpy.trader.constant import Exchange, StockType
from vnpy.trader.object import LogData, BarData
from vnpy.data.tdx.tdx_common import get_tdx_market_code
from vnpy.data.common import stock_to_adj
from vnpy.trader.utility import load_json, save_json, get_csv_last_dt, extract_vt_symbol, get_folder_path, TRADER_DIR, \
append_data
from vnpy.data.stock.stock_base import get_stock_base
from vnpy.data.stock.adjust_factor import get_all_adjust_factor
from vnpy.app.stock_screener.template import ScreenerTemplate
APP_NAME = 'StockScreenerEngine'
# 选股器日志
EVENT_SCR = 'eScrLog'
class StockScreenerEngine(BaseEngine):
"""
选股引擎
"""
# 策略配置文件
setting_filename = "screener_setting.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
self.main_engine = main_engine
self.event_engine = event_engine
self.strategies = {} # 所有运行选股策略实例
self.classes = {} # 选股策略得类
self.class_module_map = {} # 策略模块与策略映射
# 是否激活 write_log写入event bus(比较耗资源)
self.event_log = False
self.strategy_loggers = {} # strategy_name: logger
self.thread_executor = ThreadPoolExecutor(max_workers=1)
self.thread_tasks = []
self.create_logger(logger_name=APP_NAME)
# 获取全量股票信息
self.write_log(f'获取全量股票信息')
self.symbol_dict = get_stock_base()
self.write_log(f'共{len(self.symbol_dict)}个股票')
# 除权因子
self.write_log(f'获取所有除权因子')
self.adjust_factor_dict = get_all_adjust_factor()
self.write_log(f'共{len(self.adjust_factor_dict)}条除权信息')
# 寻找数据文件所在目录
vnpy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
self.write_log(f'项目所在目录:{vnpy_root}')
self.bar_data_folder = os.path.abspath(os.path.join(vnpy_root, 'bar_data'))
if os.path.exists(self.bar_data_folder):
SSE_folder = os.path.abspath(os.path.join(vnpy_root, 'bar_data', 'SSE'))
if os.path.exists(SSE_folder):
self.write_log(f'上交所bar数据目录:{SSE_folder}')
else:
self.write_error(f'不存在上交所数据目录:{SSE_folder}')
SZSE_folder = os.path.abspath(os.path.join(vnpy_root, 'bar_data', 'SZSE'))
if os.path.exists(SZSE_folder):
self.write_log(f'深交所bar数据目录:{SZSE_folder}')
else:
self.write_error(f'不存在深交所数据目录:{SZSE_folder}')
else:
self.write_error(f'不存在bar数据目录:{self.bar_data_folder}')
self.bar_data_folder = None
def get_all_vt_symbols(self, exchange: Exchange = None, stock_types: List[StockType] = [StockType.STOCK]):
"""
获取所有股票列表
:param exchange: 交易所过滤器
:param stock_types: 合约类型:stock_cn, index_cn,etf_cn,bond_cn,cb_cn
:return:
"""
vt_symbols = []
if len(stock_types) > 0:
stock_type_values = [s.value for s in stock_types]
else:
stock_type_values = []
for symbol_marketid, info in self.symbol_dict.items():
if exchange:
if info.get('exchange', None) != exchange.value:
continue
if len(stock_type_values) > 0:
if info.get('type', None) not in stock_type_values:
continue
vt_symbols.append('{}.{}'.format(info.get('code'), info.get('exchange')))
return vt_symbols
def write_log(self, msg: str, strategy_name: str = '', level: int = logging.INFO):
"""
Create cta engine log event.
"""
if self.event_log:
# 推送至全局CTA_LOG Event
log = LogData(msg=f"{strategy_name}: {msg}" if strategy_name else msg,
gateway_name="Screener",
level=level)
event = Event(type=EVENT_SCR, data=log)
self.event_engine.put(event)
# 保存单独的策略日志
if strategy_name:
strategy_logger = self.strategy_loggers.get(strategy_name, None)
if not strategy_logger:
log_path = get_folder_path('log')
log_filename = str(log_path.joinpath(str(strategy_name)))
print(u'create logger:{}'.format(log_filename))
self.strategy_loggers[strategy_name] = setup_logger(file_name=log_filename,
name=str(strategy_name))
strategy_logger = self.strategy_loggers.get(strategy_name)
if strategy_logger:
strategy_logger.log(level, msg)
else:
if self.logger:
self.logger.log(level, msg)
# 如果日志数据异常,错误和告警,输出至sys.stderr
if level in [logging.CRITICAL, logging.ERROR, logging.WARNING]:
print(f"{strategy_name}: {msg}" if strategy_name else msg, file=sys.stderr)
def write_error(self, msg: str, strategy_name: str = '', level: int = logging.ERROR):
"""写入错误日志"""
self.write_log(msg=msg, strategy_name=strategy_name, level=level)
@lru_cache()
def get_data_path(self):
data_path = os.path.abspath(os.path.join(TRADER_DIR, 'data'))
return data_path
@lru_cache()
def get_logs_path(self):
log_path = os.path.abspath(os.path.join(TRADER_DIR, 'log'))
return log_path
@lru_cache()
def get_price_tick(self, vt_symbol: str):
"""查询价格最小跳动"""
contract = self.main_engine.get_contract(vt_symbol)
if contract is None:
self.write_error(f'查询不到{vt_symbol}合约信息,缺省使用0.01作为价格跳动')
return 0.01
return contract.pricetick
@lru_cache()
def get_name(self, vt_symbol: str):
"""查询合约的name"""
contract = self.main_engine.get_contract(vt_symbol)
if contract is None:
symbol_info = self.symbol_dict.get(vt_symbol, None)
if symbol_info:
name = symbol_info.get('name', None)
if name:
return name
self.write_error(f'查询不到{vt_symbol}合约信息')
return vt_symbol
return contract.name
def get_bars(
self,
vt_symbol: str,
days: int,
interval: Interval,
interval_num: int = 1
):
"""获取历史记录"""
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now()
start = end - timedelta(days)
bars = []
# 检查股票代码
if vt_symbol not in self.symbol_dict:
self.write_error(f'{vt_symbol}不在基础配置股票信息中')
return bars
# 检查数据文件目录
if not self.bar_data_folder:
self.write_error(f'没有bar数据目录')
return bars
# 按照交易所的存放目录
bar_file_folder = os.path.abspath(os.path.join(self.bar_data_folder, f'{exchange.value}'))
resample_min = False
resample_hour = False
resample_day = False
file_interval_num = 1
# 只有1,5,15,30分钟,日线数据
if interval == Interval.MINUTE:
# 如果存在相应的分钟文件,直接读取
bar_file_path = os.path.abspath(os.path.join(bar_file_folder, f'{symbol}_{interval_num}m.csv'))
if interval_num in [1, 5, 15, 30] and os.path.exists(bar_file_path):
file_interval_num = interval
# 需要resample
else:
resample_min = True
if interval_num > 5:
file_interval_num = 5
elif interval == Interval.HOUR:
file_interval_num = 5
resample_hour = True
bar_file_path = os.path.abspath(os.path.join(bar_file_folder, f'{symbol}_{file_interval_num}m.csv'))
elif interval == Interval.DAILY:
bar_file_path = os.path.abspath(os.path.join(bar_file_folder, f'{symbol}_{interval_num}d.csv'))
if not os.path.exists(bar_file_path):
file_interval_num = 5
resample_day = True
bar_file_path = os.path.abspath(os.path.join(bar_file_folder, f'{symbol}_{file_interval_num}m.csv'))
else:
self.write_error(f'目前仅支持分钟,小时,日线数据')
return bars
bar_interval_seconds = interval_num * 60
if not os.path.exists(bar_file_path):
self.write_error(f'没有bar数据文件:{bar_file_path}')
return bars
try:
data_types = {
"datetime": str,
"open": float,
"high": float,
"low": float,
"close": float,
"volume": float,
"amount": float,
"symbol": str,
"trading_day": str,
"date": str,
"time": str
}
symbol_df = None
qfq_bar_file_path = bar_file_path.replace('.csv', '_qfq.csv')
use_qfq_file = False
last_qfq_dt = get_csv_last_dt(qfq_bar_file_path)
if last_qfq_dt is not None:
last_dt = get_csv_last_dt(bar_file_path)
if last_qfq_dt == last_dt:
use_qfq_file = True
if use_qfq_file:
self.write_log(f'使用前复权文件:{qfq_bar_file_path}')
symbol_df = pd.read_csv(qfq_bar_file_path, dtype=data_types)
else:
# 加载csv文件 =》 dateframe
self.write_log(f'使用未复权文件:{bar_file_path}')
symbol_df = pd.read_csv(bar_file_path, dtype=data_types)
# 转换时间,str =》 datetime
symbol_df["datetime"] = pd.to_datetime(symbol_df["datetime"], format="%Y-%m-%d %H:%M:%S")
# 设置时间为索引
symbol_df = symbol_df.set_index("datetime")
# 裁剪数据
symbol_df = symbol_df.loc[start:end]
if resample_day:
self.write_log(f'{vt_symbol} resample:{file_interval_num}m => {interval}day')
symbol_df = self.resample_bars(df=symbol_df, to_day=True)
elif resample_hour:
self.write_log(f'{vt_symbol} resample:{file_interval_num}m => {interval}hour')
symbol_df = self.resample_bars(df=symbol_df, x_hour=interval_num)
elif resample_min:
self.write_log(f'{vt_symbol} resample:{file_interval_num}m => {interval}m')
symbol_df = self.resample_bars(df=symbol_df, x_min=interval_num)
if len(symbol_df) == 0:
return bars
if not use_qfq_file:
# 复权转换
adj_list = self.adjust_factor_dict.get(vt_symbol, [])
# 按照结束日期,裁剪复权记录
adj_list = [row for row in adj_list if
row['dividOperateDate'].replace('-', '') <= end.strftime('%Y%m%d')]
if len(adj_list) > 0:
self.write_log(f'需要对{vt_symbol}进行前复权处理')
for row in adj_list:
row.update({'dividOperateDate': row.get('dividOperateDate')[:10] + ' 09:30:00'})
# list -> dataframe, 转换复权日期格式
adj_data = pd.DataFrame(adj_list)
adj_data["dividOperateDate"] = pd.to_datetime(adj_data["dividOperateDate"],
format="%Y-%m-%d %H:%M:%S")
adj_data = adj_data.set_index("dividOperateDate")
# 调用转换方法,对open,high,low,close, volume进行复权, fore, 前复权, 其他,后复权
symbol_df = stock_to_adj(symbol_df, adj_data, adj_type='fore')
for dt, bar_data in symbol_df.iterrows():
bar_datetime = dt # - timedelta(seconds=bar_interval_seconds)
bar = BarData(
gateway_name='backtesting',
symbol=symbol,
exchange=exchange,
datetime=bar_datetime
)
if 'open' in bar_data:
bar.open_price = float(bar_data['open'])
bar.close_price = float(bar_data['close'])
bar.high_price = float(bar_data['high'])
bar.low_price = float(bar_data['low'])
else:
bar.open_price = float(bar_data['open_price'])
bar.close_price = float(bar_data['close_price'])
bar.high_price = float(bar_data['high_price'])
bar.low_price = float(bar_data['low_price'])
bar.volume = int(bar_data['volume']) if not np.isnan(bar_data['volume']) else 0
bar.date = dt.strftime('%Y-%m-%d')
bar.time = dt.strftime('%H:%M:%S')
str_td = str(bar_data.get('trading_day', ''))
if len(str_td) == 8:
bar.trading_day = str_td[0:4] + '-' + str_td[4:6] + '-' + str_td[6:8]
else:
bar.trading_day = bar.date
bars.append(bar)
except Exception as ex:
self.write_error(u'回测时读取{} csv文件{}失败:{}'.format(vt_symbol, bar_file_path, ex))
self.write_error(traceback.format_exc())
return bars
return bars
def resample_bars(self, df, x_min=None, x_hour=None, to_day=False):
"""
重建x分钟K线(或日线)
:param df: 输入分钟数
:param x_min: 5, 15, 30, 60
:param x_hour: 1, 2, 3, 4
:param include_day: 重建日线, True得时候,不会重建分钟数
:return:
"""
# 设置df数据中每列的规则
ohlc_rule = {
'open': 'first', # open列:序列中第一个的值
'high': 'max', # high列:序列中最大的值
'low': 'min', # low列:序列中最小的值
'close': 'last', # close列:序列中最后一个的值
'volume': 'sum', # volume列:将所有序列里的volume值作和
'amount': 'sum', # amount列:将所有序列里的amount值作和
"symbol": 'first',
"trading_date": 'first',
"date": 'first',
"time": 'first'
}
if isinstance(x_min, int) and not to_day:
# 合成x分钟K线并删除为空的行 参数 closed:left类似向上取值既 09:30的k线数据是包含09:30-09:35之间的数据
df_target = df.resample(f'{x_min}min', closed='left', label='left').agg(ohlc_rule).dropna(axis=0,
how='any')
return df_target
if isinstance(x_hour, int) and not to_day:
# 合成x小时K线并删除为空的行 参数 closed:left类似向上取值既 09:30的k线数据是包含09:30-09:35之间的数据
df_target = df.resample(f'{x_hour}hour', closed='left', label='left').agg(ohlc_rule).dropna(axis=0,
how='any')
return df_target
if to_day:
# 合成x分钟K线并删除为空的行 参数 closed:left类似向上取值既 09:30的k线数据是包含09:30-09:35之间的数据
df_target = df.resample(f'D', closed='left', label='left').agg(ohlc_rule).dropna(axis=0, how='any')
return df_target
return df
def get_adjust_factor(self, vt_symbol, check_date=None):
"""
获取[check_date前]除权因子
:param vt_symbol:
:param check_date: 某一指定日期
:return:
"""
stock_adjust_factor_list = self.adjust_factor_dict.get(vt_symbol, [])
if len(stock_adjust_factor_list) == 0:
return None
stock_adjust_factor_list.reverse()
if check_date is None:
check_date = datetime.now().strftime('%Y-%m-%d')
for d in stock_adjust_factor_list:
if d.get("dividOperateDate","") < check_date:
return d
return None
def register_event(self):
"""
注册事件
:return:
"""
pass
def register_funcs(self):
"""
register the funcs to main_engine
:return:
"""
self.main_engine.get_all_completed_status = self.get_all_completed_status
def init_engine(self):
"""
"""
self.register_event()
self.register_funcs()
self.load_strategy_class()
self.load_strategy_setting()
self.write_log("CTA策略引擎初始化成功")
def load_strategy_class(self):
"""
Load strategy class from source code.
"""
# 加载 vnpy/app/cta_strategy_pro/strategies的所有策略
path1 = os.path.abspath(os.path.join(os.path.dirname(__file__), "strategies"))
self.load_strategy_class_from_folder(
path1, "vnpy.app.stock_screener.strategies")
def load_strategy_class_from_folder(self, path: str, module_name: str = ""):
"""
Load strategy class from certain folder.
"""
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
if filename.endswith(".py"):
strategy_module_name = ".".join(
[module_name, filename.replace(".py", "")])
elif filename.endswith(".pyd"):
strategy_module_name = ".".join(
[module_name, filename.split(".")[0]])
elif filename.endswith(".so"):
strategy_module_name = ".".join(
[module_name, filename.split(".")[0]])
else:
continue
self.load_strategy_class_from_module(strategy_module_name)
def load_strategy_class_from_module(self, module_name: str):
"""
Load/Reload strategy class from module file.
"""
try:
module = importlib.import_module(module_name)
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, type) and issubclass(value, ScreenerTemplate) and value is not ScreenerTemplate):
class_name = value.__name__
if class_name not in self.classes:
self.write_log(f"加载策略类{module_name}.{class_name}")
else:
self.write_log(f"更新策略类{module_name}.{class_name}")
self.classes[class_name] = value
self.class_module_map[class_name] = module_name
return True
except: # noqa
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg=msg, level=logging.CRITICAL)
return False
def load_strategy_setting(self):
"""
Load setting file.
"""
# 读取策略得配置
self.strategy_setting = load_json(self.setting_filename)
for strategy_name, strategy_config in self.strategy_setting.items():
self.add_strategy(
class_name=strategy_config["class_name"],
strategy_name=strategy_name,
setting=strategy_config["setting"],
auto_init=strategy_config.get('auto_init', False),
auto_start=strategy_config.get('auto_start', False)
)
def update_strategy_setting(self, strategy_name: str, setting: dict, auto_init: bool = False,
auto_start: bool = False):
"""
Update setting file.
"""
strategy = self.strategies[strategy_name]
# 原配置
old_config = self.strategy_setting.get('strategy_name', {})
new_config = {
| |
= False
return b_exists
def service_shutDown_check():
"""
Verifies that a docker service can be shutdown.
Should multiple jobs have been scheduled temporally serially
with the same jid/serviceName, then the actual service can
only be shut down once all identical jobs have had their
state stored.
Returns bool:
- True: can shut down
- False: cannot shut down
"""
ret = False
if int(str_hitIndex) < int(d_jobState['hits'])-1:
ret = False
else:
ret = True
return ret
def service_shutDown(d_serviceInfo):
"""
Shut down a service
"""
client = docker.from_env()
str_cmdShutDown = '%s --remove %s' % \
(d_serviceInfo['managerApp'], d_serviceInfo['serviceName'])
byte_str = client.containers.run(
'%s' % d_serviceInfo['managerImage'],
str_cmdShutDown,
volumes = {
'/var/run/docker.sock':
{
'bind': '/var/run/docker.sock',
'mode': 'rw'
}
},
remove=True)
return byte_str
d_serviceState = None
d_jobState = None
str_hitIndex = "0"
str_logs = ""
for k,v in kwargs.items():
if k == 'jobState': d_jobState = v
if k == 'serviceState': d_serviceState = v
if k == 'hitIndex': str_hitIndex = v
if k == 'logs': str_logs = v
if d_serviceState:
d_ret = self.t_status_process_state(**kwargs)
# d_ret {
# 'currentState': str_currentState,
# 'removeJob': b_removeJob,
# 'status': True
# }
if d_ret['removeJob']:
str_jobRoot = d_jobState['d_ret']['%s.container' % (str_hitIndex)]['jobRoot']
self.within.ptree.cd('/%s/container' % str_jobRoot)
d_serviceInfo = {
'serviceName': self.within.ptree.cat('manager/env/serviceName'),
'managerImage': self.within.ptree.cat('manager/image'),
'managerApp': self.within.ptree.cat('manager/app')
}
if service_exists(d_serviceInfo['serviceName']):
service_shutDown(d_serviceInfo)
return {
'status': True,
'd_process': d_ret
}
def t_status_process_container(self, *args, **kwargs):
"""
Execution should only reach this method for "container"ized jobs
status determination!
The 'd_state' contains a dictionary representation of the container
DB tree.
PRECONDITIONS:
o Only call this method if a container structure exists
in the relevant job tree!
POSTCONDITIONS:
o If the job is completed, then shutdown the container cluster
service.
o The memory container tree contains a dictionary called 'state'
that is the state returned by the container service, as well as
a file called 'logs' that is the stdout/stderr generated by the
job as it ran in the container.
"""
d_state = None
str_jobRoot = ''
str_hitIndex = "0"
str_logs = ''
for k,v in kwargs.items():
if k == 'd_state': d_state = v
if k == 'hitIndex': str_hitIndex = v
self.dp.qprint('checking on status using container...')
str_jobRoot = d_state['d_ret']['%s.container' % str_hitIndex]['jobRoot']
self.within.ptree.cd('/%s/container' % str_jobRoot)
str_serviceName = self.within.ptree.cat('manager/env/serviceName')
str_managerImage = self.within.ptree.cat('manager/image')
str_managerApp = self.within.ptree.cat('manager/app')
# Check if the state of the container service has been recorded to the data tree
if self.within.ptree.exists('state', path = '/%s/container' % str_jobRoot):
# If this exists, then the job has actually completed and
# its state has been recorded in the data tree. We can simply 'cat'
# the state from this memory dictionary
d_serviceState = self.within.ptree.cat('/%s/container/state' % str_jobRoot)
if self.within.ptree.exists('logs', path = '/%s/container' % str_jobRoot):
# The job has actually completed and its logs are recorded in the data tree
str_logs = self.within.ptree.cat('/%s/container/logs' % str_jobRoot)
else:
# Here, the manager has not been queried yet about the state of
# the service. We need to ask the container service for this
# state, and then record the state (and logs) in the memory
# tree, and then "shut down" the service.
client = docker.from_env()
# Get the state of the service...
str_cmdManager = '%s --state %s' % \
(str_managerApp, str_serviceName)
byte_str = client.containers.run(
'%s' % str_managerImage,
str_cmdManager,
volumes = {
'/var/run/docker.sock':
{
'bind': '/var/run/docker.sock',
'mode': 'rw'
}
},
remove = True)
d_serviceState = json.loads(byte_str.decode())
# Now, parse for the logs of the actual container run by the service:
# NB: This has only really tested/used on swarm!!
b_containerIDFound = True
try:
str_contID = d_serviceState['Status']['ContainerStatus']['ContainerID']
b_containerIDFound = True
except:
b_containerIDFound = False
if b_containerIDFound:
container = client.containers.get(str_contID)
str_logs = container.logs()
str_logs = str_logs.decode()
d_ret = self.t_status_process_container_stateObject(
hitIndex = str_hitIndex,
jobState = d_state,
serviceState = d_serviceState,
logs = str_logs
)
# d_ret {
# 'status': bool,
# d_process: {
# 'currentState': str_currentState,
# 'removeJob': b_removeJob,
# 'status': True
# }
# }
return {
'status': d_ret['status'],
'logs': str_logs,
'currentState': d_ret['d_process']['currentState']
}
def t_delete_process(self,*args, **kwargs):
"""
Deletes existing jobs. Checks if container environment is OpenShift.
If yes, call t_delete_process_openshift to delete the job.
Pending implementation for other container environment.
"""
status = jid = ''
if self.container_env == 'openshift':
self.dp.qprint('Processing openshift....')
try:
d_containerStatus = self.t_delete_process_openshift(*args, **kwargs)
status = d_containerStatus['status']
jid = d_containerStatus['jid']
except Exception as e:
if e.reason and e.reason == 'Not Found':
status = logs = currentState = e.reason
else:
raise e
d_ret = {
'action' : 'Delete Job',
'job_id' : jid,
'status' : status
}
return {
"d_ret": d_ret,
"status": status
}
def t_delete_process_openshift(self,*args, **kwargs):
"""
Delete job and related resources (pods & pvc) from OpenShift
"""
jid = status = None
for k,v in kwargs.items():
if k == 'request' and v['action'] == 'delete' : jid = v['meta']['value']
d_json = self.get_openshift_manager().state(jid)
if d_json['Status'] == 'Not Found':
status = d_json['Status']
else:
self.get_openshift_manager().remove_job(jid)
self.get_openshift_manager().remove_pvc(jid)
status = 'Job deleted successfully'
return {
"jid" : jid,
"status" : status
}
def t_status_process_openshift(self, *args, **kwargs):
"""
Determine the status of a job scheduled using the openshift manager.
PRECONDITIONS:
o Only call this method if a container structure exists
in the relevant job tree!
POSTCONDITIONS:
o If the job is completed, then shutdown the container cluster
service.
"""
self.dp.qprint('------- Processing job status within t_status_process_openshift ----------- ')
str_logs = ''
# Get job-id from request
for k,v in kwargs.items():
if k == 'request' and v['action'] == 'status' : jid = v['meta']['value']
# Query OpenShift API to get job state
d_json = self.get_openshift_manager().state(jid)
if d_json['Status']['Message'] == 'finished':
pod_names = self.get_openshift_manager().get_pod_names_in_job(jid)
for _, pod_name in enumerate(pod_names):
str_logs += self.get_openshift_manager().get_job_pod_logs(pod_name, jid)
else:
str_logs = d_json['Status']['Message']
status = d_json['Status']
currentState = d_json['Status']['Message']
return {
'status': status,
'logs': str_logs,
'currentState': [currentState]
}
def t_status_process_openshift_stateObject(self, *args, **kwargs):
"""
Process the actual JSON container return object on service
state.
PRECONDITIONS:
o This method should only ever be called by t_status_process_openshift().
POSTCONDITIONS:
o A string denoting the current state is returned.
"""
def job_exists(jid):
"""
Returns a bool:
- True: <jid> does exist
- False: <jid> does not exist
"""
b_exists = False
try:
job = self.get_openshift_manager().get_job(jid)
b_exists = True
except:
b_exists = False
return b_exists
def job_shutDown(d_serviceInfo):
"""
Shut down a service
"""
try:
self.get_openshift_manager().remove_pvc(jid)
self.get_openshift_manager().remove_job(jid)
except Exception as err:
self.dp.qprint("Error deleting pvc/job:", err)
d_serviceState = None
d_jobState = None
str_hitIndex = "0"
str_logs = ""
d_ret = {}
for k,v in kwargs.items():
if k == 'jobState': d_jobState = v
if k == 'serviceState': d_serviceState = v
if k == 'hitIndex': str_hitIndex = v
if k == 'logs': str_logs = v
if d_serviceState:
d_ret = self.t_status_process_state(**kwargs)
if d_ret['removeJob']:
str_jobRoot = d_jobState['d_ret']['%s.container' % (str_hitIndex)]['jobRoot']
self.within.ptree.cd('/%s' % str_jobRoot)
jid = self.within.ptree.cat('jid')
if job_exists(jid):
job_shutDown(jid)
return {
'status': True,
'd_process': d_ret
}
def get_openshift_manager(self):
if not self.openshiftmgr:
self.openshiftmgr = OpenShiftManager()
return self.openshiftmgr
def t_status_process_state(self, *args, **kwargs):
"""
This method processes the swarm state object to make the
final determination on a job's state and print out container
job state and logs.
It also returns a signal to the caller to trigger the removal
of the job from the swarm scheduler if the job has completed.
"""
def debug_print( str_jobRoot,
d_serviceState,
str_currentState,
str_logs
):
"""
Simply print some useful debug info.
"""
l_commsNorm = ['rx', 'rx', 'tx']
l_commsErr = ['error', 'error', 'error']
l_comms = l_commsNorm
if str_currentState == 'finishedWithError':
l_comms = l_commsErr
self.dp.qprint('\njobRoot %s\n-->%s<--...' % \
(str_jobRoot,
str_currentState),
comms = l_comms[0])
self.dp.qprint('\n%s' % self.df_print(d_serviceState),
comms = l_comms[1])
self.dp.qprint('\njob logs:\n%s' % str_logs,
comms = l_comms[2])
d_serviceState = {}
d_jobState = {}
hitIndex = 0
str_logs = ""
b_status = False
str_currentState = "undefined"
for k,v in kwargs.items():
if k == 'jobState': d_jobState = v
if k == 'serviceState': d_serviceState = v
if k == 'hitIndex': str_hitIndex = v
if k == 'logs': str_logs = v
b_removeJob = False
str_jobRoot = d_jobState['d_ret']['%s.container' % (hitIndex)]['jobRoot']
str_state = d_serviceState['Status']['State']
str_message = d_serviceState['Status']['Message']
if str_state == 'running' and | |
# coding: utf-8
#this script is about the latest news of MENA region
#we scrape different influential media websites, or so-called fake news, lol
#and send only updates to the mailbox for daily newsletter
#in order to do that, we need a db to store all the historical content of websites
#and all the scraping techniques from html parse tree to regular expression
#over time, i also discovered the issue of information overload in daily newsletter
#hence, i invented a graph theory based algorithm to extract key information
#a part of this algo will also be featured in this script to solve info redundancy
#as u can see, this is the most advanced script in web scraping repository
#it contains almost every technique we have introduced so far
#make sure you have gone through all the other scripts before moving onto this one
import pandas as pd
from bs4 import BeautifulSoup as bs
import requests
import datetime as dt
import win32com.client as win32
import sqlite3
import os
import re
import copy
import time
os.chdir('d:/')
#this is a home made special package for text mining
#it is designed to extract key information and remove similar contents
#for details of this graph traversal algorithm plz refer to the following link
# https://github.com/je-suis-tm/graph-theory/blob/master/Text%20Mining%20project/alternative%20bfs.py
import graph
#main stuff
def main():
ec=scrape('https://www.economist.com/middle-east-and-africa/',economist)
aj=scrape('https://www.aljazeera.com/topics/regions/middleeast.html',aljazeera)
tr=scrape('https://www.reuters.com/news/archive/middle-east',reuters)
bc=scrape('https://www.bbc.co.uk/news/world/middle_east',bbc)
ws=scrape('https://www.wsj.com/news/types/middle-east-news',wsj)
ft=scrape('https://www.ft.com/world/mideast',financialtimes)
bb=scrape('https://www.bloomberg.com/view/topics/middle-east',bloomberg)
cn=scrape('https://edition.cnn.com/middle-east',cnn)
fo=scrape('https://fortune.com/tag/middle-east/',fortune)
#concat scraped data via append, can use pd.concat as an alternative
#unlike the previous version, current version does not sort information by source
#the purpose of blending data together is to go through text mining pipeline
df=ft
for i in [aj,tr,bc,ws,cn,fo,ec,bb]:
df=df.append(i)
#CRUCIAL!!!
#as we append dataframe together, we need to reset the index
#otherwise, we would not be able to use reindex in database function call
df.reset_index(inplace=True,drop=True)
#first round, insert into database and remove outdated information
df=database(df)
#second round, use home made package to remove similar contents
output=graph.remove_similar(df,graph.stopword)
#if the link is not correctly captured
#remove anything before www and add https://
for i in range(len(output)):
if 'https://' not in output['link'][i]:
temp=re.search('www',output['link'][i]).start()
output.at[i,'link']='http://'+output['link'][i][temp:]
print(output)
#using html email template
#check stripo for different templates
# https://stripo.email/templates/
html="""
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta charset="UTF-8">
<meta content="width=device-width, initial-scale=1" name="viewport">
<meta name="x-apple-disable-message-reformatting">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta content="telephone=no" name="format-detection">
<title></title>
<!--[if (mso 16)]>
<style type="text/css">
a {text-decoration: none;}
</style>
<![endif]-->
<!--[if gte mso 9]><style>sup
{ font-size: 100% !important; }</style><![endif]-->
</head>
<body>
<div class="es-wrapper-color">
<!--[if gte mso 9]>
<v:background xmlns:v="urn:schemas-microsoft-com:vml"
fill="t">
<v:fill type="tile" color="#333333"></v:fill>
</v:background>
<![endif]-->
<table class="es-content-body" width="600"
cellspacing="15" cellpadding="15" bgcolor="#ffffff"
align="center">
<tr>
<td class="esd-block-text" align="center">
<h2>Middle East</h2></td>
</tr></table>
<div><br></div>
"""
#there are a few ways for embed image in html email
#here, we use the link of the image
#it may be a lil bit slow to load the image but its the most efficient way
#alternatively, we can use mail.Attachments.add()
#we attach all images, and set <img src='cid: imagename.jpg'>
#the downside is that we have to scrape the website repeatedly to get images
#or we can use < img src='data:image/jpg; base64, [remove the brackets and paste base64]'/>
#but this is blocked by most email clients including outlook 2016
for i in range(len(output)):
html+="""<table class="es-content-body" width="600"
cellspacing="10" cellpadding="5" bgcolor="#ffffff"
align="center">"""
html+="""<tr><td class="esd-block-text es-p10t es-p10b"
align="center"><p><a href="%s">
<font color="#6F6F6F">%s<font><a></p></td></tr>
<tr><td align="center">
<img src="%s" width="200" height="150"/></td></tr>
<tr>"""%(output['link'][i],output['title'][i],output['image'][i])
html+="""</tr></table><div><br></div>"""
html+="""
</div>
</body>
</html>
"""
send(html)
#i use win32 to control outlook and send emails
#when you have a win 10 pro, it is the easiest way to do it
#cuz windows pro automatically launches outlook at startup
#otherwise, there is a library called smtp for pop3/imap server
#supposedly authentication of corporate email would kill u
#i definitely recommend folks to use win32 library
#note that using win32.email requires outlook to stay active
#do not close the app until u actually send out the email
#win32 library uses COM api to control windows
#go to microsoft developer network
#check mailitem object model to learn how to manipulate outlook emails
#the website below is the home page of outlook vba reference
# https://msdn.microsoft.com/en-us/vba/vba-outlook
def send(html):
#create an email with recipient, subject, context and attachment
outlook = win32.Dispatch('outlook.application')
mail = outlook.CreateItem(0)
#these email addresses are fabricated, PLZ DO NOT HARASS OUR GODDESS
#just some random pornstar i love
receivers = ['<EMAIL>',
'<EMAIL>',
'<EMAIL>']
#use ';' to separate receipients
#this is a requirement of outlook
mail.To = ';'.join(receivers)
mail.Subject ='Mid East Newsletter %s'%(dt.datetime.now())
mail.BodyFormat=2
#use html to make email looks more elegant
#html is very simple
#use br for line break, b for bold fonts
#font for color and size, a href for hyperlink
#check the website below to see more html tutorials
# https://www.w3schools.com/html/
#Alternatively, we can use plain text email
#remember to use '\r\n' to jump line
#assuming html is a list of str
#the code should be mail.Body = '\r\n'.join(html)
mail.HTMLBody=html
#i usually print out everything
#need to check carefully before sending to stakeholders
#we can use mail.Display() to see the draft instead
condition=str(input('0/1 for no/yes:'))
if condition=='1':
mail.Send()
print('\nSENT')
else:
print('\nABORT')
return
#database insertion and output the latest feeds
#i assume you are familiar with sqlite3
#if not, plz check the following link
# https://github.com/je-suis-tm/web-scraping/blob/master/LME.py
def database(df):
temp=[]
conn = sqlite3.connect('mideast_news.db')
c = conn.cursor()
#the table structure is simple
#the table name is new
#there are three columns, title, link and image
#the data types of all of them are TEXT
#title is the primary key which forbids duplicates
for i in range(len(df)):
try:
c.execute("""INSERT INTO news VALUES (?,?,?)""",df.iloc[i,:])
conn.commit()
print('Updating...')
#the idea is very simple
#insert each line from our scraped result into database
#as the primary key has been set up
#we have non-duplicate title constraint
#insert what has already been in database would raise an error
#if so, just ignore the error and pass to the next iteration
#we can utilize the nature of database to pick out the latest information
#every successful insertion into the database also goes to the output
#at the end, output contains nothing but latest updates of websites
#that is what we call newsletter
temp.append(i)
except Exception as e:
print(e)
conn.close()
#check if the output contains no updates
if temp:
output=df.loc[[i for i in temp]]
output.reset_index(inplace=True,drop=True)
else:
output=pd.DataFrame()
output['title']=['No updates yet.']
output['link']=output['image']=['']
return output
#scraping webpages and do some etl
def scrape(url,method):
print('scraping webpage effortlessly')
time.sleep(5)
session=requests.Session()
response = session.get(url,headers={'User-Agent': 'Mozilla/5.0'})
page=bs(response.content,'html.parser',from_encoding='utf_8_sig')
df=method(page)
out=database(df)
return out
"""
the functions below are data etl of different media sources
"""
#the economist etl
def economist(page):
title,link,image=[],[],[]
df=pd.DataFrame()
prefix='https://www.economist.com'
a=page.find_all('div',class_="topic-item-container")
for i in a:
link.append(prefix+i.find('a').get('href'))
title.append(i.find('a').text)
image.append(i.parent.find('img').get('src'))
df['title']=title
df['link']=link
df['image']=image
return df
#fortune etl
def fortune(page):
title,link,image=[],[],[]
df=pd.DataFrame()
prefix='https://fortune.com'
a=page.find_all('article')
for i in a:
link.append(prefix+i.find('a').get('href'))
if 'http' in i.find('img').get('src'):
image.append(i.find('img').get('src'))
else:
image.append('')
temp=re.split('\s*',i.find_all('a')[1].text)
temp.pop()
temp.pop(0)
title.append(' '.join(temp))
df['title']=title
df['link']=link
df['image']=image
return df
#cnn etl
def cnn(page):
title,link,image=[],[],[]
df=pd.DataFrame()
prefix='https://edition.cnn.com'
a=page.find_all('div', class_='cd__wrapper')
for i in a:
title.append(i.find('span').text)
link.append(prefix+i.find('a').get('href'))
try:
image.append('https:'+i.find('img').get('data-src-medium'))
except:
image.append('')
df['title']=title
df['link']=link
df['image']=image
return df
#bloomberg etl
def bloomberg(page):
title,link,image=[],[],[]
df=pd.DataFrame()
prefix='https://www.bloomberg.com'
a=page.find_all('h1')
for i in a:
try:
link.append(prefix+i.find('a').get('href'))
title.append(i.find('a').text.replace('’','\''))
except:
pass
b=page.find_all('li')
for j in b:
try:
temp=j.find('article').get('style')
image.append( \
re.search('(?<=url\()\S*(?=\))', \
temp).group() \
)
except:
temp=j.find('article')
try:
temp2=temp.get('id')
if not temp2:
image.append('')
except:
pass
df['title']=title
df['link']=link
df['image']=image
return df
#financial times etl
def financialtimes(page):
title,link,image=[],[],[]
df=pd.DataFrame()
prefix='https://www.ft.com'
a=page.find_all('a',class_='js-teaser-heading-link')
for i in a:
link.append(prefix+i.get('href'))
temp=i.text.replace('’','\'').replace('‘','\'')
title.append(temp.replace('\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t',''))
for j in a:
temp=j.parent.parent.parent
try:
text=re.search('(?<=")\S*(?=next)',str(temp)).group()
image.append(text+'next&fit=scale-down&compression=best&width=210')
except:
image.append('')
df['title']=title
df['link']=link
df['image']=image
return df
#wall street journal etl
def wsj(page):
df=pd.DataFrame()
text=str(page)
link=re.findall('(?<=headline"> <a href=")\S*(?=">)',text)
image=re.findall('(?<=img data-src=")\S*(?=")',text)
title=[]
for i in link:
try:
temp=re.search('(?<={}")>(.*?)<'.format(i),text).group()
title.append(temp)
except:
pass
for i in range(len(title)):
title[i]=title[i].replace('’',"'").replace('<','').replace('>','')
df['title']=title
df['link']=link[:len(title)]
df['image']=image+[''] if (len(image)!=len(title)) else image
return df
#bbc etl
def bbc(page):
title,link,image=[],[],[]
df=pd.DataFrame()
prefix='https://www.bbc.co.uk'
a=page.find_all('span',class_='title-link__title-text')
for i in a:
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['AssociationArgs', 'Association']
@pulumi.input_type
class AssociationArgs:
def __init__(__self__, *,
apply_only_at_cron_interval: Optional[pulumi.Input[bool]] = None,
association_name: Optional[pulumi.Input[str]] = None,
automation_target_parameter_name: Optional[pulumi.Input[str]] = None,
calendar_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
compliance_severity: Optional[pulumi.Input['AssociationComplianceSeverity']] = None,
document_version: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
max_concurrency: Optional[pulumi.Input[str]] = None,
max_errors: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
output_location: Optional[pulumi.Input['AssociationInstanceAssociationOutputLocationArgs']] = None,
parameters: Optional[Any] = None,
schedule_expression: Optional[pulumi.Input[str]] = None,
sync_compliance: Optional[pulumi.Input['AssociationSyncCompliance']] = None,
targets: Optional[pulumi.Input[Sequence[pulumi.Input['AssociationTargetArgs']]]] = None,
wait_for_success_timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a Association resource.
:param pulumi.Input[str] association_name: The name of the association.
:param pulumi.Input[str] document_version: The version of the SSM document to associate with the target.
:param pulumi.Input[str] instance_id: The ID of the instance that the SSM document is associated with.
:param pulumi.Input[str] name: The name of the SSM document.
:param Any parameters: Parameter values that the SSM document uses at runtime.
:param pulumi.Input[str] schedule_expression: A Cron or Rate expression that specifies when the association is applied to the target.
:param pulumi.Input[Sequence[pulumi.Input['AssociationTargetArgs']]] targets: The targets that the SSM document sends commands to.
"""
if apply_only_at_cron_interval is not None:
pulumi.set(__self__, "apply_only_at_cron_interval", apply_only_at_cron_interval)
if association_name is not None:
pulumi.set(__self__, "association_name", association_name)
if automation_target_parameter_name is not None:
pulumi.set(__self__, "automation_target_parameter_name", automation_target_parameter_name)
if calendar_names is not None:
pulumi.set(__self__, "calendar_names", calendar_names)
if compliance_severity is not None:
pulumi.set(__self__, "compliance_severity", compliance_severity)
if document_version is not None:
pulumi.set(__self__, "document_version", document_version)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
if max_concurrency is not None:
pulumi.set(__self__, "max_concurrency", max_concurrency)
if max_errors is not None:
pulumi.set(__self__, "max_errors", max_errors)
if name is not None:
pulumi.set(__self__, "name", name)
if output_location is not None:
pulumi.set(__self__, "output_location", output_location)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if schedule_expression is not None:
pulumi.set(__self__, "schedule_expression", schedule_expression)
if sync_compliance is not None:
pulumi.set(__self__, "sync_compliance", sync_compliance)
if targets is not None:
pulumi.set(__self__, "targets", targets)
if wait_for_success_timeout_seconds is not None:
pulumi.set(__self__, "wait_for_success_timeout_seconds", wait_for_success_timeout_seconds)
@property
@pulumi.getter(name="applyOnlyAtCronInterval")
def apply_only_at_cron_interval(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "apply_only_at_cron_interval")
@apply_only_at_cron_interval.setter
def apply_only_at_cron_interval(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "apply_only_at_cron_interval", value)
@property
@pulumi.getter(name="associationName")
def association_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the association.
"""
return pulumi.get(self, "association_name")
@association_name.setter
def association_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "association_name", value)
@property
@pulumi.getter(name="automationTargetParameterName")
def automation_target_parameter_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "automation_target_parameter_name")
@automation_target_parameter_name.setter
def automation_target_parameter_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "automation_target_parameter_name", value)
@property
@pulumi.getter(name="calendarNames")
def calendar_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "calendar_names")
@calendar_names.setter
def calendar_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "calendar_names", value)
@property
@pulumi.getter(name="complianceSeverity")
def compliance_severity(self) -> Optional[pulumi.Input['AssociationComplianceSeverity']]:
return pulumi.get(self, "compliance_severity")
@compliance_severity.setter
def compliance_severity(self, value: Optional[pulumi.Input['AssociationComplianceSeverity']]):
pulumi.set(self, "compliance_severity", value)
@property
@pulumi.getter(name="documentVersion")
def document_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of the SSM document to associate with the target.
"""
return pulumi.get(self, "document_version")
@document_version.setter
def document_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "document_version", value)
@property
@pulumi.getter(name="instanceId")
def instance_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the instance that the SSM document is associated with.
"""
return pulumi.get(self, "instance_id")
@instance_id.setter
def instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_id", value)
@property
@pulumi.getter(name="maxConcurrency")
def max_concurrency(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "max_concurrency")
@max_concurrency.setter
def max_concurrency(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_concurrency", value)
@property
@pulumi.getter(name="maxErrors")
def max_errors(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "max_errors")
@max_errors.setter
def max_errors(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_errors", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the SSM document.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="outputLocation")
def output_location(self) -> Optional[pulumi.Input['AssociationInstanceAssociationOutputLocationArgs']]:
return pulumi.get(self, "output_location")
@output_location.setter
def output_location(self, value: Optional[pulumi.Input['AssociationInstanceAssociationOutputLocationArgs']]):
pulumi.set(self, "output_location", value)
@property
@pulumi.getter
def parameters(self) -> Optional[Any]:
"""
Parameter values that the SSM document uses at runtime.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[Any]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="scheduleExpression")
def schedule_expression(self) -> Optional[pulumi.Input[str]]:
"""
A Cron or Rate expression that specifies when the association is applied to the target.
"""
return pulumi.get(self, "schedule_expression")
@schedule_expression.setter
def schedule_expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schedule_expression", value)
@property
@pulumi.getter(name="syncCompliance")
def sync_compliance(self) -> Optional[pulumi.Input['AssociationSyncCompliance']]:
return pulumi.get(self, "sync_compliance")
@sync_compliance.setter
def sync_compliance(self, value: Optional[pulumi.Input['AssociationSyncCompliance']]):
pulumi.set(self, "sync_compliance", value)
@property
@pulumi.getter
def targets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AssociationTargetArgs']]]]:
"""
The targets that the SSM document sends commands to.
"""
return pulumi.get(self, "targets")
@targets.setter
def targets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AssociationTargetArgs']]]]):
pulumi.set(self, "targets", value)
@property
@pulumi.getter(name="waitForSuccessTimeoutSeconds")
def wait_for_success_timeout_seconds(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "wait_for_success_timeout_seconds")
@wait_for_success_timeout_seconds.setter
def wait_for_success_timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "wait_for_success_timeout_seconds", value)
class Association(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
apply_only_at_cron_interval: Optional[pulumi.Input[bool]] = None,
association_name: Optional[pulumi.Input[str]] = None,
automation_target_parameter_name: Optional[pulumi.Input[str]] = None,
calendar_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
compliance_severity: Optional[pulumi.Input['AssociationComplianceSeverity']] = None,
document_version: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
max_concurrency: Optional[pulumi.Input[str]] = None,
max_errors: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
output_location: Optional[pulumi.Input[pulumi.InputType['AssociationInstanceAssociationOutputLocationArgs']]] = None,
parameters: Optional[Any] = None,
schedule_expression: Optional[pulumi.Input[str]] = None,
sync_compliance: Optional[pulumi.Input['AssociationSyncCompliance']] = None,
targets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AssociationTargetArgs']]]]] = None,
wait_for_success_timeout_seconds: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
The AWS::SSM::Association resource associates an SSM document in AWS Systems Manager with EC2 instances that contain a configuration agent to process the document.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] association_name: The name of the association.
:param pulumi.Input[str] document_version: The version of the SSM document to associate with the target.
:param pulumi.Input[str] instance_id: The ID of the instance that the SSM document is associated with.
:param pulumi.Input[str] name: The name of the SSM document.
:param Any parameters: Parameter values that the SSM document uses at runtime.
:param pulumi.Input[str] schedule_expression: A Cron or Rate expression that specifies when the association is applied to the target.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AssociationTargetArgs']]]] targets: The targets that the SSM document sends commands to.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[AssociationArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The AWS::SSM::Association resource associates an SSM document in AWS Systems Manager with EC2 instances that contain a configuration agent to process the document.
:param str resource_name: The name of the resource.
:param AssociationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AssociationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
apply_only_at_cron_interval: Optional[pulumi.Input[bool]] = None,
association_name: Optional[pulumi.Input[str]] = None,
automation_target_parameter_name: Optional[pulumi.Input[str]] = None,
calendar_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
compliance_severity: Optional[pulumi.Input['AssociationComplianceSeverity']] = None,
document_version: Optional[pulumi.Input[str]] = None,
instance_id: Optional[pulumi.Input[str]] = None,
max_concurrency: Optional[pulumi.Input[str]] = None,
max_errors: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
output_location: Optional[pulumi.Input[pulumi.InputType['AssociationInstanceAssociationOutputLocationArgs']]] = None,
parameters: Optional[Any] = None,
schedule_expression: Optional[pulumi.Input[str]] = None,
sync_compliance: Optional[pulumi.Input['AssociationSyncCompliance']] = None,
targets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AssociationTargetArgs']]]]] = None,
wait_for_success_timeout_seconds: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AssociationArgs.__new__(AssociationArgs)
__props__.__dict__["apply_only_at_cron_interval"] = apply_only_at_cron_interval
__props__.__dict__["association_name"] = association_name
__props__.__dict__["automation_target_parameter_name"] = automation_target_parameter_name
__props__.__dict__["calendar_names"] = calendar_names
__props__.__dict__["compliance_severity"] = compliance_severity
__props__.__dict__["document_version"] = document_version
__props__.__dict__["instance_id"] = instance_id
__props__.__dict__["max_concurrency"] = max_concurrency
__props__.__dict__["max_errors"] = max_errors
__props__.__dict__["name"] = name
__props__.__dict__["output_location"] = output_location
__props__.__dict__["parameters"] = parameters
__props__.__dict__["schedule_expression"] = schedule_expression
__props__.__dict__["sync_compliance"] = sync_compliance
__props__.__dict__["targets"] = targets
__props__.__dict__["wait_for_success_timeout_seconds"] = wait_for_success_timeout_seconds
__props__.__dict__["association_id"] = None
super(Association, __self__).__init__(
'aws-native:ssm:Association',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Association':
"""
Get an existing Association resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AssociationArgs.__new__(AssociationArgs)
__props__.__dict__["apply_only_at_cron_interval"] = None
__props__.__dict__["association_id"] = None
__props__.__dict__["association_name"] = None
__props__.__dict__["automation_target_parameter_name"] = None
__props__.__dict__["calendar_names"] = None
__props__.__dict__["compliance_severity"] = None
__props__.__dict__["document_version"] = None
__props__.__dict__["instance_id"] = None
| |
<reponame>icon-lab/provoGAN<filename>models/networks.py
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.autograd import Variable
from torch.optim import lr_scheduler
###############################################################################
# Functions
###############################################################################
def weights_init_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('Linear') != -1:
init.normal(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.xavier_normal(m.weight.data, gain=0.02)
elif classname.find('Linear') != -1:
init.xavier_normal(m.weight.data, gain=0.02)
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == 'batch_3D':
norm_layer = functools.partial(nn.BatchNorm3d, affine=True)
elif norm_type == 'instance_3D':
norm_layer = functools.partial(nn.InstanceNorm3d, affine=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def define_G(input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, init_type='normal', gpu_ids=[],down_samp=1):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netG == 'resnet_9blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids,down_samp=down_samp)
elif which_model_netG == 'resnet_6blocks':
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6, gpu_ids=gpu_ids,down_samp=down_samp)
elif which_model_netG == 'unet_128':
netG = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_256':
netG = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'unet_att':
netG = UnetGenerator_withatt(input_nc, output_nc, 5, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'resnet_9blocks_3D':
netG = ResnetGenerator_3D(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids,down_samp=down_samp)
elif which_model_netG == 'unet_att_3D':
netG = UnetGenerator_withatt_3D(input_nc, output_nc, 5, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
init_weights(netG, init_type=init_type)
return netG
def define_D(input_nc, ndf, which_model_netD,
n_layers_D=3, norm='batch', use_sigmoid=False, init_type='normal', gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netD == 'basic':
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'basic_att':
netD = NLayerDiscriminator_att(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'pixel':
netD = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
if which_model_netD == 'basic_3D':
netD = NLayerDiscriminator_3D(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'basic_att_3D':
netD = NLayerDiscriminator_att_3D(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' %
which_model_netD)
if use_gpu:
netD.cuda(gpu_ids[0])
init_weights(netD, init_type=init_type)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
##############################################################################
# Classes
##############################################################################
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
# Defines the generator that consists of Resnet blocks between a few
# downsampling/upsampling operations.
# Code and idea originally from <NAME>'s architecture.
# https://github.com/jcjohnson/fast-neural-style/
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect',down_samp=1):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
self.down_samp=down_samp
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
if down_samp==1:
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
if down_samp==1:
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
#mult = 2**n_downsampling
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.Conv2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=1,
padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class ResnetGenerator_3D(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm3d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect',down_samp=1, kernelsize_chosen=3, padsize=1):
assert(n_blocks >= 0)
super(ResnetGenerator_3D, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
self.down_samp=down_samp
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm3d
else:
use_bias = norm_layer == nn.InstanceNorm3d
model = [nn.ReplicationPad3d(3),
nn.Conv3d(input_nc, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
if down_samp==1:
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv3d(ngf * mult, ngf * mult * 2, kernel_size=kernelsize_chosen,
stride=2, padding=padsize, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
else:
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv3d(ngf * mult, ngf * mult * 2, kernel_size=kernelsize_chosen,
padding=padsize, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock_3D(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias, kernelsize_chosen=kernelsize_chosen, padsize=padsize)]
if down_samp==1:
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose3d(ngf * mult, int(ngf * mult / 2),
kernel_size=kernelsize_chosen, stride=2,
padding=padsize, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
else:
#mult = 2**n_downsampling
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.Conv3d(ngf * mult, int(ngf * mult / 2),
kernel_size=kernelsize_chosen, stride=1,
padding=padsize,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReplicationPad3d(3)]
model += [nn.Conv3d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def | |
in incompatible:
if getattr(args, name):
restart_option = "restart" if args.restart else "start"
_incompatible_with_restart_error(desc, restart_option)
def _check_platform_compatibility(args):
if (
(args.background or args.pidfile)
and util.get_platform() == "Windows"
and os.getenv("FORCE_RUN_IN_BACKGROUND") != "1"
):
_background_on_windows_error()
###################################################################
# Dispatch op
###################################################################
def _dispatch_op(S):
if S.args.help_model:
_print_model_help(S)
elif S.args.help_op:
_print_op_help(S)
elif S.args.test_output_scalars:
_test_output_scalars(S)
elif S.args.test_sourcecode:
_test_sourcecode(S)
elif S.args.test_flags:
_test_flags(S)
else:
_dispatch_op_cmd(S)
###################################################################
# Model / op help
###################################################################
def _print_model_help(S):
assert S.user_op._opdef
helplib.print_model_help(S.user_op._opdef.modeldef)
def _print_op_help(S):
assert S.user_op._opdef
helplib.print_op_help(S.user_op._opdef)
###################################################################
# Test output scalars
###################################################################
class TestOutputLogger(summary.TestOutputLogger):
@staticmethod
def line(line):
cli.out(line)
def pattern_no_matches(self, pattern):
msg = self._format_pattern_no_matches(pattern)
cli.out(cli.style(msg, dim=True))
def pattern_matches(self, pattern, matches, vals):
msg = self._format_pattern_matches(pattern, matches, vals)
cli.out(cli.style(msg, fg="yellow"))
def _test_output_scalars(S):
if _output_scalars_disabled(S.user_op):
cli.out("Output scalars disabled, nothing to test", err=True)
return
output_scalars = S.user_op._output_scalars or summary.DEFAULT_OUTPUT_SCALARS
input_path = S.args.test_output_scalars
logger = TestOutputLogger()
if input_path == "-" and sys.stdin.isatty():
cli.note(
"Type patterns and press Enter to test. "
"Use Ctrl-c or empty line to exit."
)
with _open_output(input_path) as f:
summary.test_output(f, output_scalars, logger)
def _open_output(path):
if path == "-":
return util.StdinReader(stop_on_blank_line=sys.stdin.isatty())
try:
return open(path, "rb")
except (IOError, OSError) as e:
if e.errno == 2:
cli.error("%s does not exist" % path)
else:
cli.error("error opening %s: %s" % (path, e))
###################################################################
# Test source code
###################################################################
def _test_sourcecode(S):
opdef = S.user_op._opdef
assert opdef
logger = _CopyLogger()
sourcecode_src = opdef.guildfile.dir
sourcecode_select = op_util.sourcecode_select_for_opdef(opdef)
op_util.copy_sourcecode(
sourcecode_src, sourcecode_select, None, handler_cls=logger.handler_cls
)
cli.out("Copying from %s" % cmd_impl_support.cwd_desc(logger.root))
cli.out("Rules:")
for rule in logger.select.rules:
cli.out(" %s" % _format_file_select_rule(rule))
if logger.select.disabled:
assert not logger.selected, logger.selected
assert not logger.skipped, logger.skipped
cli.out("Source code copy disabled")
else:
cli.out("Selected for copy:")
for path in logger.selected:
cli.out(cli.style(" %s" % path, fg="yellow"))
cli.out("Skipped:")
for path in logger.skipped:
cli.out(cli.style(" %s" % path, dim=True))
class _CopyLogger:
root = None
select = None
def __init__(self):
self.selected = []
self.skipped = []
def handler_cls(self, src_root, dest_root, select):
assert dest_root is None, dest_root
self.root = os.path.relpath(src_root)
self.select = select
return self
def copy(self, path, _results):
self.selected.append(os.path.join(self.root, path))
def ignore(self, path, _results):
self.skipped.append(os.path.join(self.root, path))
def _format_file_select_rule(rule):
parts = ["include" if rule.result else "exclude"]
if rule.type:
parts.append(rule.type)
parts.append(", ".join([repr(p) for p in rule.patterns]))
extras = _format_file_select_rule_extras(rule)
if extras:
parts.append(extras)
return " ".join(parts)
def _format_file_select_rule_extras(rule):
parts = []
if rule.regex:
parts.append("regex")
if rule.sentinel:
parts.append("with %r" % rule.sentinel)
if rule.size_gt:
parts.append("size > %s" % rule.size_gt)
if rule.size_lt:
parts.append("size < %s" % rule.size_lt)
if rule.max_matches:
parts.append("max match %s" % rule.max_matches)
return ", ".join(parts)
###################################################################
# Test flags
###################################################################
def _test_flags(S):
opdef = S.user_op._opdef
assert opdef
def out(parent, attr, indent=0):
val = getattr(parent, attr)
prefix = "%s%s:" % (" " * indent, attr.replace("_", "-"))
if val is None:
cli.out(prefix)
else:
if attr == "choices":
val = [flag_util.encode_flag_val(c.value) for c in val]
cli.out("%s %s" % (prefix, flag_util.encode_flag_val(val)))
out(opdef, "flags_dest")
out(opdef, "flags_import")
cli.out("flags:")
for f in opdef.flags:
cli.out(" %s:" % f.name)
for attr in FLAG_TEST_ATTRS:
out(f, attr, 4)
###################################################################
# Dispatch op command
###################################################################
def _dispatch_op_cmd(S):
if S.args.print_cmd:
_print_cmd(S)
elif S.args.print_env:
_print_env(S)
elif S.args.print_trials:
_print_trials(S)
elif S.args.save_trials:
_save_trials(S)
else:
_confirm_and_run(S)
###################################################################
# Print op info / save trials
###################################################################
def _print_cmd(S):
if S.batch_op:
_print_op_cmd_args(S.batch_op.cmd_args)
_print_batch_trials_cmd_args(S)
else:
_print_op_cmd_args(S.user_op.cmd_args)
def _print_op_cmd_args(args):
cli.out(" ".join([util.shlex_quote(arg) for arg in args]))
def _print_batch_trials_cmd_args(S):
_run_tmp_batch(S, {"PRINT_TRIALS_CMD": "1"})
def _run_tmp_batch(S, extra_env):
assert S.batch_op
with util.TempDir() as tmp:
_init_batch_run(S, tmp.path)
_run_op(S.batch_op, S.args, extra_env)
def _print_env(S):
_print_op_cmd_env(S.user_op.cmd_env)
def _print_op_cmd_env(env):
for name, val in sorted(env.items()):
cli.out("%s=%s" % (name, util.env_var_quote(val)))
def _print_trials(S):
if not S.batch_op:
_print_trials_for_non_batch_error()
_run_tmp_batch(S, {"PRINT_TRIALS": "1"})
def _save_trials(S):
if not S.batch_op:
_save_trials_for_non_batch_error()
path = _save_trials_path(S.args.save_trials)
cli.out("Saving trials to %s" % util.format_dir(path))
_run_tmp_batch(S, {"SAVE_TRIALS": os.path.abspath(os.path.expanduser(path))})
def _save_trials_path(save_trials_arg):
_check_trials_path(save_trials_arg)
cwd = config.cwd()
return (
os.path.join(cwd, save_trials_arg) if cwd not in (".", "") else save_trials_arg
)
def _check_trials_path(path):
_root, ext = os.path.splitext(path)
if ext.lower() not in (".csv", ".json", ""):
cli.error(
"Unsupported extension '%s' - use '.csv', '.json', or no extension" % ext
)
###################################################################
# Run
###################################################################
def _confirm_and_run(S):
if S.args.yes or _confirm_run(S):
_run(S)
# =================================================================
# Confirm op
# =================================================================
def _confirm_run(S):
prompt = (
"You are about to {action} {subject}"
"{batch_suffix}{remote_suffix}{flags_note}\n"
"{user_flags}"
"{optimizer_flags}"
"Continue?".format(
action=_preview_op_action(S),
subject=_preview_op_subject(S),
batch_suffix=_preview_batch_suffix(S),
remote_suffix=_preview_remote_suffix(S),
flags_note=_preview_flags_note(S),
user_flags=_preview_user_flags(S),
optimizer_flags=_preview_optimizer_flags(S),
)
)
return cli.confirm(prompt, default=True)
def _preview_op_action(S):
if S.args.stage:
return "stage"
elif S.args.stage_trials:
return "stage trials for"
elif S.args.restart:
return "start"
else:
return "run"
def _preview_op_subject(S):
op_desc = _fmt_opref(S.user_op.opref)
if S.restart_run:
return "%s (%s)" % (S.restart_run.id, op_desc)
else:
return op_desc
def _fmt_opref(opref):
return opref.to_opspec(config.cwd())
def _preview_batch_suffix(S):
if not S.batch_op:
return ""
return "".join(
[
_batch_desc_preview_part(S.batch_op),
_batch_qualifier_preview_part(S),
]
)
def _batch_desc_preview_part(op):
opt_name = op.opref.to_opspec(config.cwd())
if opt_name == "+":
return " as a batch"
elif opt_name in ("random", "skopt:random"):
return " with random search"
else:
return " with %s optimizer" % opt_name
def _batch_qualifier_preview_part(S):
batch_op = S.batch_op
parts = []
if batch_op.opref.op_name == "+":
parts.append(_preview_trials_count(S))
elif batch_op._max_trials:
parts.append("%i trials" % batch_op._max_trials)
if _is_likey_optimizer(batch_op) and batch_op._objective:
parts.append(_objective_preview_part(batch_op._objective))
if not parts:
return ""
return " (%s)" % ", ".join(parts)
def _preview_trials_count(S):
trials_count = _trials_count(S)
if trials_count == 1:
return "1 trial"
else:
return "%i trials" % trials_count
def _trials_count(S):
count = len(_op_trials(S.user_op))
if S.batch_op._max_trials is not None:
count = min(count, S.batch_op._max_trials)
return count
def _op_trials(op):
if op._batch_trials:
return batch_util.expand_trial_flags(
op._batch_trials, op._op_flag_vals, op._user_flag_vals, op._random_seed
)
else:
return batch_util.expand_flags(op._op_flag_vals, op._random_seed)
def _is_likey_optimizer(op):
"""Return True if op is likely an optimizer.
All operations are considered likely except those known to NOT be
optimizers. These are '+' (the general batch op) and 'random'.
Ideally the operation would indicate if it is an optimizer but
Guild doesn't support an interface for this.
"""
return op.opref.op_name not in ("+", "random")
def _objective_preview_part(obj):
if obj[:1] == "-":
return "maximize %s" % obj[1:]
else:
return "minimize %s" % obj
def _preview_remote_suffix(S):
if S.args.remote:
return " on %s" % S.args.remote
return ""
def _preview_flags_note(S):
if S.user_op._op_flag_vals and S.user_op._batch_trials:
return " (flags below used unless specified in batch trial)"
return ""
def _preview_user_flags(S):
return _preview_flags(S.user_op._op_flag_vals, S.user_op._flag_null_labels)
def _preview_flags(flag_vals, null_labels):
if not flag_vals:
return ""
return (
"\n".join(
[
" %s" % _format_flag(name, val, null_labels)
for name, val in sorted(flag_vals.items())
]
)
+ "\n"
)
def _format_flag(name, val, null_labels):
if val is None:
formatted = _null_label(name, null_labels)
else:
formatted = util.find_apply(
[_try_format_function, flag_util.encode_flag_val], val
)
return "%s: %s" % (name, formatted)
def _try_format_function(val):
if not isinstance(val, six.string_types):
return None
try:
flag_util.decode_flag_function(val)
except ValueError:
return None
else:
return val
def _null_label(name, null_labels):
null_label = null_labels.get(name, "default")
return flag_util.encode_flag_val(null_label)
def _preview_optimizer_flags(S):
if not S.batch_op or not S.batch_op._op_flag_vals:
return ""
flags_preview = _preview_flags(
S.batch_op._op_flag_vals, S.batch_op._flag_null_labels
)
preview = "Optimizer flags:\n%s" % flags_preview
return cli.style(preview, dim=True)
# =================================================================
# Run / stage
# =================================================================
def _run(S):
if S.args.remote:
_run_remote(S)
else:
_run_local(S)
def _run_remote(S):
_check_remote_script(S.user_op.opref)
remote_impl_support.run(_remote_args(S))
def _check_remote_script(opref):
if opref.pkg_type == "script":
cli.error(
"cannot run scripts remotely\n"
"Define an operation in guild.yml that uses %s as the main "
"module and run that operation instead." % opref.to_opspec(config.cwd())
)
def _remote_args(S):
params = S.args.as_kw()
params["opspec"] = S.user_op.opref.to_opspec()
if S.restart_run:
params["restart"] = S.restart_run.id
return click_util.Args(**params)
def _run_local(S):
_check_run_needed(S)
op = _init_op_for_run(S)
if S.args.stage:
_stage_op(op, S.args)
else:
_run_op(op, S.args)
def _check_run_needed(S):
if not S.args.needed:
return
matching = _remove_failed_runs(_find_matching_runs(S))
if matching:
if _restarting_match(matching, S):
_skip_needed_unchanged_flags_info()
else:
_skip_needed_matches_info(matching)
raise SystemExit(0)
def _find_matching_runs(S):
if S.batch_op:
matching = op_util.find_matching_runs(
S.batch_op.opref, S.batch_op._op_flag_vals
)
return _filter_matching_batch_runs(matching, S.user_op)
else:
return op_util.find_matching_runs(S.user_op.opref, S.user_op._op_flag_vals)
def _filter_matching_batch_runs(batch_runs, user_op):
return [
run
for run in batch_runs
if (
run.batch_proto
and op_util.is_matching_run(
run.batch_proto,
user_op.opref,
user_op._op_flag_vals,
include_pending=True,
)
)
]
def _remove_failed_runs(runs):
return [run for run in runs if run.status != "error"]
def _restarting_match(matches, S):
restart_run = S.batch_op._run if S.batch_op else S.user_op._run
return restart_run and restart_run.id in (run.id for run in matches)
def _init_op_for_run(S):
if S.batch_op:
_init_batch_run(S)
return S.batch_op
return S.user_op
def _init_batch_run(S, run_dir=None):
batch_run = oplib.init_run(S.batch_op, run_dir)
S.batch_op.run_dir = batch_run.dir
oplib.init_run(S.user_op, batch_run.guild_path("proto"))
def _stage_op(op, args):
try:
run = oplib.stage(op, continue_on_deps_error=args.force_deps)
except op_dep.OpDependencyError as e:
_op_dependency_error(e)
else:
if not args.quiet:
_print_staged_info(run)
def _print_staged_info(run):
if _is_run_outside_guild_home(run):
_print_staged_dir_instructions(run)
else:
_print_stage_pending_instructions(run)
def _is_run_outside_guild_home(run):
return not util.compare_paths(os.path.dirname(run.dir), var.runs_dir())
def _print_staged_dir_instructions(run):
cmd_args = run.get("cmd") or []
cmd = " ".join([util.shlex_quote(arg) for arg in cmd_args])
cli.out(
"{op} staged in '{dir}'\n"
"To start the operation, use "
"\"(cd '{dir}' && source .guild/ENV && {cmd})\"".format(
op=run_util.format_operation(run), dir=run.dir, cmd=cmd
)
)
def _print_stage_pending_instructions(run):
cli.out(
"{op} staged as {run_id}\n"
"To start the operation, use 'guild run --start {run_id}'".format(
op=run_util.format_operation(run), run_id=run.id
)
)
def _run_op(op, args, extra_env=None):
try:
run, exit_status = oplib.run(
op,
quiet=args.quiet,
pidfile=_op_pidfile(args),
stop_after=args.stop_after,
extra_env=extra_env,
continue_on_deps_error=args.force_deps,
)
except op_dep.OpDependencyError as e:
_op_dependency_error(e)
except oplib.ProcessError as e:
_op_process_error(op, e)
else:
_handle_run_exit(exit_status, op, run)
def _op_pidfile(args):
if args.pidfile:
return args.pidfile
elif args.background:
return util.TempFile("guild-pid-").path
else:
return None
def _handle_run_exit(exit_status, op, run):
sys.stdout.flush()
if exit_status is None:
pass
elif exit_status == 0:
_handle_run_success(op, run)
else:
| |
# coding=utf-8
from __future__ import print_function
import sys
from phi.tf.model import *
from phi.control.control import *
from phi.control.iksm import *
def ik(sim, initial_density, target_density, trainable=False):
assert not trainable
with tf.variable_scope("ik"):
optimizable_velocity = ik_resnet(initial_density, target_density, trainable=trainable, training=False, reuse=tf.AUTO_REUSE)
optimizable_velocity = optimizable_velocity.pad(0, 1, "symmetric").staggered
zeros = math.zeros_like(optimizable_velocity)
velocity = StaggeredGrid(tf.concat([optimizable_velocity[:, :, :30, :],
zeros[:, :, 30:-30, :],
optimizable_velocity[:, :, -30:, :]], axis=-2))
velocity = sim.divergence_free(velocity, accuracy=1e-3)
return velocity
def ik_resnet(initial_density, target_density, training=False, trainable=True, reuse=None):
y = tf.concat([initial_density, target_density], axis=-1)
y = tf.pad(y, [[0, 0], [0, 1 + 2 + 4 + 8 + 16 + 16], [0, 1 + 2 + 4 + 8 + 16 + 16], [0, 0]])
resolutions = [y]
for i in range(1, 6): # 1/2, 1/4, 1/8, 1/16, 1/32
y = tf.layers.conv2d(resolutions[0], 16, 2, strides=2, activation=tf.nn.relu, padding='valid',
name='downconv_%d' % i, trainable=trainable, reuse=reuse)
for j, nb_channels in zip(range(3), [16, 16, 16]):
y = residual_block(y, nb_channels, name='downrb_%d_%d' % (i, j), training=training, trainable=trainable,
reuse=reuse)
resolutions.insert(0, y)
y = tf.layers.conv2d(y, 16, 2, 1, activation=tf.nn.relu, padding='valid', name='centerconv_1', trainable=trainable,
reuse=reuse)
fc_branch = tf.layers.conv2d(y, 4, 1, 1, activation=tf.nn.relu, padding='valid', name='fc_reduce', trainable=trainable,
reuse=reuse)
fc_branch = tf.reshape(fc_branch, [-1, 64])
fc_branch = tf.layers.dense(fc_branch, 64, activation=tf.nn.relu, name='fc_dense2', trainable=trainable, reuse=reuse)
fc_branch = tf.reshape(fc_branch, [-1, 4, 4, 4])
y = tf.concat([y[..., :-4], fc_branch], axis=-1)
for j, nb_channels in zip(range(3), [16, 16, 16]):
y = residual_block(y, nb_channels, name='centerrb_%d' % j, training=training, trainable=trainable, reuse=reuse)
for i in range(1, len(resolutions)):
y = upsample2x(y)
res_in = resolutions[i][:, 0:y.shape[1], 0:y.shape[2], :]
y = tf.concat([y, res_in], axis=-1)
if i < len(resolutions) - 1:
y = tf.pad(y, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='SYMMETRIC')
y = tf.layers.conv2d(y, 16, 2, 1, activation=tf.nn.relu, padding='valid', name='upconv_%d' % i,
trainable=trainable, reuse=reuse)
for j, nb_channels in zip(range(3), [16, 16, 16]):
y = residual_block(y, nb_channels, 2, name='uprb_%d_%d' % (i, j), training=training,
trainable=trainable, reuse=reuse)
else:
# Last iteration
y = tf.pad(y, [[0, 0], [0, 1], [0, 1], [0, 0]], mode='SYMMETRIC')
y = tf.layers.conv2d(y, 2, 2, 1, activation=None, padding='valid', name='upconv_%d' % i,
trainable=trainable, reuse=reuse)
return StaggeredGrid(y) # This is the velocity
class GraphBuilder(PartitioningExecutor):
def __init__(self, sim, true_densities, trainable_n, info, force_inference, ik_trainable=False):
self.sim = sim
self.true_densities = true_densities
self.trainable_n = trainable_n
self.info = info
self.force_inference = force_inference
self.ik_trainable = ik_trainable
def create_frame(self, index, step_count):
frame = PartitioningExecutor.create_frame(self, index, step_count)
frame.true = self.true_densities[index]
frame.pred = []
frame.real = None
frame.force = None
frame.prev_force = None
frame.jerk = None
frame.density = None
frame.velocity = None
frame.prev_velocity = None
if index == 0:
frame.pred = [ frame.true ]
frame.real = frame.true
frame.density = frame.true
elif index == step_count:
frame.pred = [ frame.true ]
frame.density = frame.true
return frame
def run_sm(self, n, initial_density, target_density):
with tf.variable_scope("sm%d" % n):
return sm_resnet(initial_density, target_density, trainable=n in self.trainable_n)
def run_ik(self, initial_density, target_density):
return ik(self.sim, initial_density, target_density, trainable=self.ik_trainable)
def run_advect(self, velocity, density):
return velocity.advect(density)
def run_force(self, initial_velocity, target_velocity, initial_density, real_target_density):
if self.force_inference == "forcenet":
force, self.forcenet_path = forcenet(initial_density, initial_velocity, target_velocity)
else:
next_velocity = initial_velocity.advect(initial_velocity) + self.sim.buoyancy(real_target_density)
if self.force_inference == "exact":
next_velocity = self.sim.divergence_free(next_velocity)
force = target_velocity - next_velocity
return force
def run_jerk(self, initial_velocity, initial_force, next_force):
advected_force = initial_velocity.advect(initial_force)
return next_force - advected_force
def partition(self, n, initial_frame, target_frame, center_frame):
PartitioningExecutor.partition(self, n, initial_frame, target_frame, center_frame)
center_frame.density = self.run_sm(n, initial_frame.density, target_frame.density)
center_frame.pred.append(center_frame.density)
def execute_step(self, initial_frame, target_frame):
PartitioningExecutor.execute_step(self, initial_frame, target_frame)
initial_frame.velocity = target_frame.prev_velocity = self.run_ik(initial_frame.real, target_frame.pred[-1])
target_frame.real = target_frame.density = self.run_advect(initial_frame.velocity, initial_frame.real)
if initial_frame.prev_velocity is not None:
initial_frame.force = self.run_force(initial_frame.prev_velocity, initial_frame.velocity, initial_frame.real, target_frame.real)
target_frame.prev_force = initial_frame.force
if initial_frame.prev_force is not None:
initial_frame.jerk = self.run_jerk(initial_frame.prev_velocity, initial_frame.prev_force, initial_frame.force)
def load_checkpoints(self, max_n, checkpoint_dict, preload_n):
# Force
if self.force_inference == "forcenet":
self.info("Loading ForceNet checkpoint from %s..." % self.forcenet_path)
self.sim.restore(self.forcenet_path, scope="ForceNet")
# IK
ik_checkpoint = os.path.expanduser(checkpoint_dict["IK"])
self.info("Loading IK checkpoint from %s..." % ik_checkpoint)
self.sim.restore(ik_checkpoint, scope="ik")
# SM
n = 2
while n <= max_n:
if n == max_n and not preload_n: return
checkpoint_path = None
i = n
while not checkpoint_path:
if "SM%d"%i in checkpoint_dict:
checkpoint_path = os.path.expanduser(checkpoint_dict["SM%d"%i])
else:
i //= 2
if i == n:
self.info("Loading SM%d checkpoint from %s..." % (n, checkpoint_path))
self.sim.restore(checkpoint_path, scope="sm%d" % n)
else:
self.info("Loading SM%d weights from SM%d checkpoint from %s..." % (n, i, checkpoint_path))
self.sim.restore_new_scope(checkpoint_path, "sm%d" % i, "sm%d" % n)
n *= 2
def load_all_from(self, max_n, ik_checkpoint, sm_checkpoint, sm_n):
# IK
self.info("Loading IK checkpoint from %s..." % ik_checkpoint)
self.sim.restore(ik_checkpoint, scope="ik")
# SM
n = 2
while n <= max_n:
source_n = sm_n(n) if callable(sm_n) else sm_n
self.info("Loading SM%d weights from SM%d checkpoint from %s..." % (n, source_n, sm_checkpoint))
self.sim.restore_new_scope(sm_checkpoint, "sm%d" % source_n, "sm%d" % n)
n *= 2
def lookup(self, array):
return array
def build_obstacles(sim):
sim.set_obstacle((4, 20), (60, 0)) # Left --
sim.set_obstacle((92, 4), (14, 128-22+2)) # Right |
sim.set_obstacle((4, 128-60), (4, 30)) # Bottom ------
sim.set_obstacle((38, 4), (14, 20-2)) # Left lower |
sim.set_obstacle((34, 4), (72, 20-2)) # Left upper |
# Buckets
sim.set_obstacle((10, 2), (110-5, 20-1))
sim.set_obstacle((10, 2), (110-5, 50-1))
sim.set_obstacle((10, 2), (110-5, 80-1))
sim.set_obstacle((10, 2), (110-5, 110-1))
pass
class SmokeSM(TFModel):
def __init__(self):
TFModel.__init__(self, "Refine Indirect SmokeSM %d"%n, "Slow motion density generation CNN training",
learning_rate=1e-4, data_validation_fraction=0.1,
training_batch_size=16, validation_batch_size=100,
model_scope_name="sm%d"%n)
self.info("Mode: %s" % ("Graph" if use_graph else "Inference"))
self.info("Preload: %s" % ("Yes" if preload else "No"))
self.info("Divide Strategy: %s" % divide_strategy)
self.info("Force: %s" % force_inference)
self.info("Autorun: %s" % (("Supervised" if supervised else "Unsupervised") if autorun else "No"))
self.setup()
self.info("Setting up database...")
fac = 2 if half_res else 1
for i in range(n+1):
field = BatchSelect(lambda len, i=i: range(fac*i, len-fac*n+fac*i), "Density")
if half_res:
field = transform.Downsample(field)
self.database.add("density%d"%i, augment.AxisFlip(2, field))
self.database.put_scenes(scenes('~/data/control/rising-squares'), per_scene_indices=range(n*fac+1), logf=self.info)
self.finalize_setup([f.true for f in self.sequence if f.true is not None])
build_obstacles(self.sim)
# Load previously trained models
ik_16_frame = "/home/holl/model/refine-smokeik-16/sim_000009/checkpoint_00043426"
sm_supervised_squares = "/home/holl/model/supervised-squaresm/sim_000005/checkpoint_00003810"
sm_diffphys = "/home/holl/model/refine-indirect-smokesm-16/sim_000001/checkpoint_00008806" # build on ik_16, pretrained on sm_supervised_squares
self.executor.load_all_from(n, ik_16_frame,
sm_diffphys,
lambda i: i)
self.display_time = EditableInt("Frame Display", n//2, (0, n))
self.add_field("Density (Ground Truth)", lambda: self.view_batch("density%d"%self.display_time))
self.add_field("Density (Real)", lambda: self.view(self.sequence[self.display_time].real))
self.add_field("Density (Predicted)", lambda: self.view(self.sequence[self.display_time].pred[0]))
self.add_field("Velocity", lambda: self.view(self.sequence[self.display_time].velocity))
self.add_field("Force", lambda: self.view(self.sequence[self.display_time].force))
self.add_field("Jerk", lambda: self.view(self.sequence[self.display_time].jerk))
self.add_field('Domain', self.sim.extended_fluid_mask)
if not use_graph:
self.step()
def setup(self):
# Simulation
self.sim = TFFluidSimulation([128] * 2, DomainBoundary([(False, True), (False, False)]), force_use_masks=True)
# Placeholders
true_densities = [ None ] * (n+1)
for i in [0, n//2, n]:
true_densities[i] = self.sim.placeholder(name="density%d" % i)
if use_graph:
self.executor = GraphBuilder(self.sim, true_densities, trainable_n=range(n+2), ik_trainable=False, info=self.info, force_inference=force_inference)
else:
self.executor = EagerExecutor(self.sim, true_densities, self.info, force_inference)
seq = self.sequence = get_divide_strategy(divide_strategy)(n, self.executor)
if use_graph:
self.sequence.execute()
# Density loss
self.blurred_density_diff = normalize_to(seq[-1].real, seq[-1].true) - seq[-1].true
# for i in range(3):
# self.blurred_density_diff = downsample2x(self.blurred_density_diff)
# self.blurred_density_diff = blur(self.blurred_density_diff, 4.0, cutoff=16)
self.blurred_density_diff = blur(self.blurred_density_diff, 2.0, cutoff=16)
final_density_loss = l2_loss(self.blurred_density_diff) * self.editable_float("Final_Density_Loss_Scale", 1e4) # 1e7 for 1/8 res 4px, 1e4 for 4px
self.add_scalar("Final_Density_Loss", final_density_loss)
# Force loss
force_losses = []
jerk_losses = []
for frame in seq:
if frame.force is not None:
force_losses.append(l2_loss(frame.force))
self.add_scalar("Force_%d"%frame.index, l1_loss(frame.force))
if frame.jerk is not None:
jerk_losses.append(l2_loss(frame.jerk))
self.add_scalar("Jerk_%d"%frame.index, l1_loss(frame.jerk))
force_loss = tf.add_n(force_losses) * self.editable_float("Force_Loss_Scale", 1e-2)
self.add_scalar("Total_Force_Loss", force_loss)
if jerk_losses:
jerk_loss = tf.add_n(jerk_losses) * self.editable_float("Jerk_Loss_Scale", 1e-3)
self.add_scalar("Total_Jerk_Loss", jerk_loss)
else:
jerk_loss = 0
self.unsupervised_optim = self.minimizer("Unsupervised_Loss", force_loss + final_density_loss + jerk_loss)
# Supervised loss
supervised_loss = l2_loss((seq[n//2].pred[0] - seq[n//2].true) / (0.1+spatial_sum(seq[n//2].true))) * 1e6
self.supervised_optim = self.minimizer("Supervised_Loss", supervised_loss)
def step(self):
if use_graph:
self.tfstep(self.unsupervised_optim)
else:
self.executor.set_dict(self.feed_dict(self.val_iterator, False))
self.sequence.execute()
def action_plot_sequences(self):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
self.info("Computing frames...")
real_densities = [self.executor.lookup(self.sequence[i].real) for i in range(len(self.sequence))]
pred_densities = [self.executor.lookup(self.sequence[i].pred[-1]) for i in range(len(self.sequence))]
real_velocities = [self.executor.lookup(self.sequence[i].velocity.staggered) for i in range(len(self.sequence) - 1)]
if use_graph:
data = self.view(real_densities + pred_densities + real_velocities, all_batches=True)
real_densities = data[:len(real_densities)]
pred_densities = data[len(real_densities):len(real_densities) + len(pred_densities)]
real_velocities = data[len(real_densities) + len(pred_densities):]
vmin = 0
vmax = max(np.max(real_densities), np.max(pred_densities))
np.save(self.scene.subpath("seq_pred"), np.stack(pred_densities, 1))
np.save(self.scene.subpath("seq_real"), np.stack(real_densities, 1))
np.save(self.scene.subpath("seq_vel"), np.stack(real_velocities, 1))
for batch in range(real_densities[0].shape[0]):
batchdir = os.path.join(self.get_image_dir(), "sequence_%d"%batch)
self.info("Plotting batch batch %d to %s" % (batch, batchdir))
os.mkdir(batchdir)
for i in range(len(real_densities)):
real_density = real_densities[i]
pred_density = pred_densities[i]
plt.figure(figsize=(20, 10))
# Real
plt.subplot2grid((1, 3), (0, 1))
plt.title('Real')
plt.imshow(real_density[batch, :, :, 0], interpolation="nearest", cmap="bwr", origin="lower", vmin=vmin, vmax=vmax)
# Predicted
plt.subplot2grid((1, 3), (0, 2))
plt.title('Predicted')
plt.imshow(pred_density[batch, :, :, 0], interpolation="nearest", cmap="bwr", origin="lower", vmin=vmin, vmax=vmax)
# Save file
plt.savefig(os.path.join(batchdir, "It_%d_Sequence_%d.png" % (self.time, i)))
plt.close()
self.info("Saved all sequences to %s" | |
<reponame>Mousegamertank/curso-python
#000
print('Olá Mundo')
n1 = input('Insira o primeiro valor')
n2 = input('Insira o segundo valor')
print(n1 + n2)
#001
#primeiro exercicio
print('Olá, Mundo')
#ou
msg = 'Ol<NAME>'
print(msg)
#-------
# print ('Olá Mundo')
print ('caso haja uma mensagem sem as aspas')
nome = 'pedro'
idade = 25
peso = 75.8
print(nome, idade, peso)
nome = input('Qual é seu nome')
idade = input('Qual a sua idade')
peso = input('Qual o seu peso')
print(nome, idade, peso)
#002
nome = input('Qual o seu nome?')
print('Olá' + nome + '! Prazer em te conhecer!')
#O python tem a formatação preparada logo
#print('Olá' + nome + '! Prazer em te conhecer!')
#ou
#print('Olá! Prazer em te conhecer! {}'.format(nome))
#----------------------------------
nome = input('Qual o seu nome?')
print('Olá' + nome + '! Prazer em te conhecer!')
dia = input('Informe-nos o dia de nascimento')
mes = input('Qual o mes do seu nascimento')
ano = input('Qual o ano de seu nascimento')
print('Você nasceu no dia' , dia, 'de', mes, 'de', ano, '. Correto?')
n1 = input('Informe-nos o primeiro numero')
n2 = input('Informe-nos o segundo numero')
print(n1 + n2)
#003 (desafio)
n1 = int(input('Insira o valor desejado \n'))
n2 = int(input('Insira o segundo valor desejado \n'))
print('A soma dos dois números são {}'.format(n1+n2))
#004 (desafio)
val = input('Insira o que você quiser desejado \n')
print('O tipo é: {}'.format(type(val)))
print('Retornaremos True se for verdadeiro abaixo e False se for falso \n')
print('Somente espaços: {}'.format(val.isspace()))
print('O que foi digitado é numerico: {}'.format(val.isnumeric()))
print('O que foi digitado está em Letras Maiusculas: {}'.format(val.isupper()))
print('É alpha númerico: {}'.format(val.isalpha()))
print('É decimal: {}',format(val.isdecimal()))
print('É idenificador: {}'.format(val.isidentifier()))
#005
#inteiro com sucessor e antecessor
n1 = int(input('Insira um número inteiro \n'))
print('O sucessor é: {} \n e o antecessor é {}'.format((n1+1), (n1-1)))
#006
#ler um núemro que mostre o dobro o triplo e a raiz quadrada
n = float(input('Informe-nos o número \n'))
#dobro = n * 2
#triplo = n * 3
#raiz = n ** (1/2)
#print(' O dobro é {:.2f} \n O triplo é {:.2f} \n e a Raiz quadrada é {}'.format(dobro, triplo, raiz))
print(' O dobro é {:.0f} \n O triplo é {:.0f} \n e a Raiz quadrada é {:.2f}'.format((n * 2), (n * 3), (n ** (1/2))))
#007
#ler duas notas e mostrar a media
n1 = float(input('Insira a primeira nota \n'))
n2 = float(input('Insira a segunda nota \n'))
#media = (n1 + n2) / 2
#print('A média é: {}'.format(media))
print('A média é: {}'.format(((n1 + n2) / 2)))
#008
#Ler um valor em metros e apresentar em cm e milimetros
valor = float(input('Insira o valor em metros \n'))
#km = valor / 1000
#hc = valor / 100
#dc = valor / 10
#cm = valor * 100
#mm = valor * 1000
#print(' O valor em centimetros fica {} \n e o valor em milimetros fica {} '.format(cm, mm))
print(' O valor em kilometros fica {} \n e o valor em hectametros fica {} '.format((valor / 1000), (valor / 100)))
print(' O valor em centimetros fica {} \n e o valor em milimetros fica {} '.format((valor * 100), (valor * 1000)))
#009
#ler inteiro e mostrar tabuada
n1 = int(input('Informe o número e devolveremos a tabuada'))
'''
print('A tabuada fica:')
print('{} * 1 = {}'.format(n1, n1 * 1))
print('{} * 2 = {}'.format(n1, n1 * 2))
print('{} * 3 = {}'.format(n1, n1 * 3))
print('{} * 4 = {}'.format(n1, n1 * 4))
print('{} * 5 = {}'.format(n1, n1 * 5))
print('{} * 6 = {}'.format(n1, n1 * 6))
print('{} * 7 = {}'.format(n1, n1 * 7))
print('{} * 8 = {}'.format(n1, n1 * 8))
print('{} * 9 = {}'.format(n1, n1 * 9))
print('{} * 10 = {}'.format(n1, n1 * 10))
'''
print('-' * 12)
print('{} * {:2} = {}'.format(n1, 1, n1 * 1))
print('{} * {:2} = {}'.format(n1, 2, n1 * 2))
print('{} * {:2} = {}'.format(n1, 3, n1 * 3))
print('{} * {:2} = {}'.format(n1, 4, n1 * 4))
print('{} * {:2} = {}'.format(n1, 5, n1 * 5))
print('{} * {:2} = {}'.format(n1, 6, n1 * 6))
print('{} * {:2} = {}'.format(n1, 7, n1 * 7))
print('{} * {:2} = {}'.format(n1, 8, n1 * 8))
print('{} * {:2} = {}'.format(n1, 9, n1 * 9))
print('{} * {} = {}'.format(n1, 10, n1 * 10))
print('-' * 12)
#010
#ler o dinheiro e mostrar em dolares
din = float(input('Informe quantos reais tu tem R$ \n'))
#dol = din / 5.65
#iene = din / 0.054 Real
#euro = din / 6.61
#libras esterlinas = din / 7.30
#print('Em dolar você possui {:.2f}'.format(dol))
print('Você possui R${} em dolar você pode comprar U${:.2f}'.format(din ,(din / 5.65)))
print('Você possui R${} em dolar você pode comprar {:.2f} iene'.format(din ,(din / 0.054)))
print('Você possui R${} em dolar você pode comprar {:.2f} EU$'.format(din ,(din / 6.61)))
print('Você possui R${} em dolar você pode comprar {:.2f} libras esterlinas'.format(din ,(din / 7.30)))
#011
#pegar altura e largura de uma parede tirar a area e pintar sendo que cada galão pinta 2m²
alt = float(input('Insira a Altura \n'))
larg = float(input('Insira a Largura \n'))
#area = (alt * larg)
print('A quantidade de tinta a ser utilizada vai ser {}'.format((alt * larg) / 2))
#012
#Ler o preço do produto e subtrair 5 %
preco = float(input('Informe-nos o valor e deixaremos com 5 porcento de desconto \n R$ '))
#descon = (preco * 5) / 100
print('O valor é de :{}'.format(preco - ((preco * 5) / 100)))
#print('O valor é de: {}'.format(preco - descon))
#013
#Ler o salario do funcionario e dar 15 porcento de aumento
sal = float(input('Informe-nos seu salario \n'))
#aumen = (sal * 15) /100
#print('Seu novo salário é: {}'.format(sal + aumen))
print('Seu novo salário é: {}'.format(sal + ((sal * 15) /100)))
#014
#ler em graus celsius a temperatura e passar para farenheigth
c = float(input('Informe o valor em graus celsius'))
f = (c * 9/5) + 32
print('O modelo de resposta de {} celsius é: {}'.format(c, f))
#015
#Perguntar a qauantidade de Km percorridos e dias sendo que o custo do dia é de 60:00 R$, e o de km é 0,15 R$
dia = int(input('Informe-nos a quantidade de dias alugado \n'))
km = float(input('Quantos Km Rodados \n'))
preco = (60 * dia) + (km * 0.15)
print('O total a se pagar foi de: R${:.2f}'.format(preco))
#016
# Ler um número qualaquer pelo teclado e mostrar a porção inteira
from math import trunc
x = float(input('Informe-nos o valor desejado \n'))
print('O valor inteiro é de: {}'.format(trunc(x)))
#
num = float(input('Digite um valor'))
print('O vlaor digitado foi {} e a sua porção foi de {}'.format(num, int(num)))
#017
#Fazer um programa para ler o comprimento do cateto oposto e do cateto adjacentte e mostre o comprimento da hipotenusa
co = float(input('cateto oposto \n'))
ca = float(input('cateto adjacente \n'))
hip = ((co**2) + (ca**2)) **(1/2)
print('A hipotenusa vai ser: {:.2f}'.format(hip))
import math
co = float(input('cateto oposto \n'))
ca = float(input('cateto adjacente \n'))
hi = math.hypot(co, ca)
print('A hipotenusa vai medir {:.2f}'.format(hi))
#018
#Programa que leia um ângulo qualquer e mostre na tela o valor do seno, cossno e tangente desse ângulo
import math
angu = float(input('Informe-nos o angulo \n'))
angu = math.radians(angu)
sen = math.sin(angu)
cos = math.cos(angu)
tan = math.tan(angu)
print('O angulo de {} tem o Seno de {:.2f}'.format(angu, sen))
print('O angulo de {} tem o Coseeno de {:.2f}'.format(angu, cos))
print('O angulo de {} tem a Hipotenusa de {:.2f}'.format(angu, tan))
#019
#sortear 4 alunos para apagar o quadro, um programa que ajude ele , lendo o nome e escrevendo o nome do escolhido
import random
alunos = []
for i in range(4):
alunos.append(input('Informe-nos o nome do aluno \n'))
print('O aluno que vai apagar a lousa é {}'.format(random.choice(alunos)))
#\\\\\\\\\\\\\
aluno1 = input('1° aluno \n')
aluno2 = input('2° aluno \n')
aluno3 = input('3° aluno \n')
aluno4 = input('4° aluno \n')
print('O aluno que vai apagar a lousa é {}'.format(random.choices(alunos)))
#020
#Pegar o programa anterior e sortear a ordem dos alunos randomicamente mostrando a ordem
import random
alunos = []
for i in range(4):
alunos.append(input('Informe-nos o nome do aluno \n'))
random.shuffle(alunos)
print('O aluno que vai apagar a lousa é {}'.format(alunos))
from random import shuffle
alunos = []
for i in range(4):
alunos.append(input('Informe-nos o nome do aluno \n'))
shuffle(alunos)
print('O aluno que vai apagar a lousa é {}'.format(alunos))
#021
#fazer um programa que reproduza um arquivo MP3
# Fazer em casa
#022
#ler o nome do usuario completo e mostrar:
#O nome com todas as letras maisculas, com as letras minusculas, quantas letras sem considerar espaço, e quantas letras tem o primeiro nome
nome = str(input('Informe o sue nome \n')).strip()
print('o nome em maiuculos vai ser {}'.format(nome.upper()))
print('o nome em minusculos vai ser {}'.format(nome.lower()))
print('o nome em quantas letras sem espaços vai ser {}'.format(len(nome)- nome.count(' ')))
#print('o nome em letras no primeiro nome vai ser {}'.format(nome.find(' ')))
separa = nome.split()
print('Seu primeiro no é {} e ele tem {} letras'.format(separa[0], len(separa[0])))
#023
#ler um número de 0 a 9999 e mostra cada um dos digitos separados, ex: 1834
#unidade: 4; dezena: 3; centena: 8; milhar: 1
#023
#ler um número de 0 a | |
communicator
self._color = None
# The group ID associated with the color
self._group = None
def _is_ndarray(self, obj):
"""
Helper function to determing if an object is a Numpy NDArray.
Parameters:
obj: The object to be tested
Returns:
bool: True if the object is a Numpy NDArray. False otherwise,
or if the Numpy module was not found during
the SimpleComm constructor.
Examples:
>>> _is_ndarray(1)
False
>>> alist = [1, 2, 3, 4]
>>> _is_ndarray(alist)
False
>>> aarray = numpy.array(alist)
>>> _is_ndarray(aarray)
True
"""
if self._numpy:
return isinstance(obj, self._numpy.ndarray)
else:
return False
def get_size(self):
"""
Get the integer number of ranks in this communicator.
The size includes the 'manager' rank.
Returns:
int: The integer number of ranks in this communicator.
"""
return 1
def get_rank(self):
"""
Get the integer rank ID of this MPI process in this communicator.
This call can be made independently from other ranks.
Returns:
int: The integer rank ID of this MPI process
"""
return 0
def is_manager(self):
"""
Check if this MPI process is on the 'manager' rank (i.e., rank 0).
This call can be made independently from other ranks.
Returns:
bool: True if this MPI process is on the master rank
(or rank 0). False otherwise.
"""
return self.get_rank() == 0
def get_color(self):
"""
Get the integer color ID of this MPI process in this communicator.
By default, a communicator's color is None, but a communicator can
be divided into color groups using the 'divide' method.
This call can be made independently from other ranks.
Returns:
int: The color of this MPI communicator
"""
return self._color
def get_group(self):
"""
Get the group ID of this MPI communicator.
The group ID is the argument passed to the 'divide' method, and it
represents a unique identifier for all ranks in the same color group.
It can be any type of object (e.g., a string name).
This call can be made independently from other ranks.
Returns:
The group ID of this communicator
"""
return self._group
def sync(self):
"""
Synchronize all MPI processes at the point of this call.
Immediately after this method is called, you can guarantee that all
ranks in this communicator will be synchronized.
This call must be made by all ranks.
"""
return
def allreduce(self, data, op):
"""
Perform an MPI AllReduction operation.
The data is "reduced" across all ranks in the communicator, and the
result is returned to all ranks in the communicator. (Reduce
operations such as 'sum', 'prod', 'min', and 'max' are allowed.)
This call must be made by all ranks.
Parameters:
data: The data to be reduced
op (str): A string identifier for a reduce operation (any string
found in the OPERATORS list)
Returns:
The single value constituting the reduction of the input data.
(The same value is returned on all ranks in this communicator.)
"""
if isinstance(data, dict):
totals = {}
for k, v in data.items():
totals[k] = SimpleComm.allreduce(self, v, op)
return totals
elif self._is_ndarray(data):
return SimpleComm.allreduce(self, getattr(self._numpy, _OP_MAP[op]['np'])(data), op)
elif hasattr(data, '__len__'):
return SimpleComm.allreduce(self, eval(_OP_MAP[op]['py'])(data), op)
else:
return data
def partition(self, data=None, func=None, involved=False, tag=0):
"""
Partition and send data from the 'manager' rank to 'worker' ranks.
By default, the data is partitioned using an "equal stride" across the
data, with the stride equal to the number of ranks involved in the
partitioning. If a partition function is supplied via the `func`
argument, then the data will be partitioned across the 'worker' ranks,
giving each 'worker' rank a different part of the data according to
the algorithm used by partition function supplied.
If the `involved` argument is True, then a part of the data (as
determined by the given partition function, if supplied) will be
returned on the 'manager' rank. Otherwise, ('involved' argument is
False) the data will be partitioned only across the 'worker' ranks.
This call must be made by all ranks.
Keyword Arguments:
data: The data to be partitioned across the ranks in the
communicator.
func: A PartitionFunction object/function that returns a part
of the data given the index and assumed size of the partition.
involved (bool): True if a part of the data should be given to the
'manager' rank in addition to the 'worker' ranks. False
otherwise.
tag (int): A user-defined integer tag to uniquely specify this
communication message.
Returns:
A (possibly partitioned) subset (i.e., part) of the data. Depending
on the PartitionFunction used (or if it is used at all), this method
may return a different part on each rank.
"""
op = func if func else lambda *x: x[0][x[1] :: x[2]]
if involved:
return op(data, 0, 1)
else:
return None
def ration(self, data=None, tag=0):
"""
Send a single piece of data from the 'manager' rank to a 'worker' rank.
If this method is called on a 'worker' rank, the worker will send a
"request" for data to the 'manager' rank. When the 'manager' receives
this request, the 'manager' rank sends a single piece of data back to
the requesting 'worker' rank.
For each call to this function on a given 'worker' rank, there must
be a matching call to this function made on the 'manager' rank.
NOTE: This method cannot be used for communication between the
'manager' rank and itself. Attempting this will cause the code to
hang.
Keyword Arguments:
data: The data to be asynchronously sent to the 'worker' rank
tag (int): A user-defined integer tag to uniquely specify this
communication message
Returns:
On the 'worker' rank, the data sent by the manager. On the
'manager' rank, None.
Raises:
RuntimeError: If executed during a serial or 1-rank parallel run
"""
err_msg = 'Rationing cannot be used in serial operation'
raise RuntimeError(err_msg)
def collect(self, data=None, tag=0):
"""
Send data from a 'worker' rank to the 'manager' rank.
If the calling MPI process is the 'manager' rank, then it will
receive and return the data sent from the 'worker'. If the calling
MPI process is a 'worker' rank, then it will send the data to the
'manager' rank.
For each call to this function on a given 'worker' rank, there must
be a matching call to this function made on the 'manager' rank.
NOTE: This method cannot be used for communication between the
'manager' rank and itself. Attempting this will cause the code to
hang.
Keyword Arguments:
data: The data to be collected asynchronously on the manager rank.
tag (int): A user-defined integer tag to uniquely specify this
communication message
Returns:
On the 'manager' rank, a tuple containing the source rank ID
and the data collected. None on all other ranks.
Raises:
RuntimeError: If executed during a serial or 1-rank parallel run
"""
err_msg = 'Collection cannot be used in serial operation'
raise RuntimeError(err_msg)
def divide(self, group):
"""
Divide this communicator's ranks into groups.
Creates and returns two (2) kinds of groups:
1. groups with ranks of the same color ID but different rank IDs
(called a "monocolor" group), and
2. groups with ranks of the same rank ID but different color IDs
(called a "multicolor" group).
Parameters:
group: A unique group ID to which will be assigned an integer
color ID ranging from 0 to the number of group ID's
supplied across all ranks
Returns:
tuple: A tuple containing (first) the "monocolor" SimpleComm for
ranks with the same color ID (but different rank IDs) and
(second) the "multicolor" SimpleComm for ranks with the same
rank ID (but different color IDs)
Raises:
RuntimeError: If executed during a serial or 1-rank parallel run
"""
err_msg = 'Division cannot be done on a serial communicator'
raise RuntimeError(err_msg)
class SimpleCommMPI(SimpleComm):
"""
Simple Communicator using MPI.
Attributes:
PART_TAG: Partition Tag Identifier
RATN_TAG: Ration Tag Identifier
CLCT_TAG: Collect Tag Identifier
REQ_TAG: Request Identifier
MSG_TAG: Message Identifer
ACK_TAG: Acknowledgement Identifier
PYT_TAG: Python send/recv Identifier
NPY_TAG: Numpy send/recv Identifier
_mpi: A reference to | |
instance. If the replacement is done via a callable it'll use it like
unlinking and directly replace the link with the text itself. It only
supports unicode when used by the callable and bytes are not allowed.
If either the section or label should be used the replacement can be a
function which returns a Link instance and copies the value which should
remaining.
.. versionchanged:: 7.0
`site` parameter is mandatory
:param text: the text in which to replace links
:param replace: either a callable which reacts like described above.
The callable must accept four parameters link, text, groups, rng and
allows for user interaction. The groups are a dict containing 'title',
'section', 'label' and 'linktrail' and the rng are the start and end
position of the link. The 'label' in groups contains everything after
the first pipe which might contain additional data which is used in
File namespace for example.
Alternatively it can be a sequence containing two items where the first
must be a Link or Page and the second has almost the same meaning as
the result by the callable. It'll convert that into a callable where
the first item (the Link or Page) has to be equal to the found link and
in that case it will apply the second value from the sequence.
:type replace: sequence of pywikibot.Page/pywikibot.Link/str or
callable
:param site: a Site object to use. It should match the origin or
target site of the text
:raises TypeError: missing positional argument 'site'
:raises ValueError: Wrong site type
:raises ValueError: Wrong replacement number
:raises ValueError: Wrong replacement types
"""
def to_link(source):
"""Return the link from source when it's a Page otherwise itself."""
if isinstance(source, pywikibot.Page):
return source._link
if isinstance(source, str):
return pywikibot.Link(source, site)
return source
def replace_callable(link, text, groups, rng):
if replace_list[0] == link:
return replace_list[1]
return None
def check_classes(replacement):
"""Normalize the replacement into a list."""
if not isinstance(replacement, (pywikibot.Page, pywikibot.Link)):
raise ValueError('The replacement must be None, False, '
'a sequence, a Link or a str but '
'is "{}"'.format(type(replacement)))
def title_section(link) -> str:
title = link.title
if link.section:
title += '#' + link.section
return title
if not isinstance(site, pywikibot.site.BaseSite):
raise ValueError('The "site" argument must be a BaseSite not {}.'
.format(type(site).__name__))
if isinstance(replace, Sequence):
if len(replace) != 2:
raise ValueError('When used as a sequence, the "replace" '
'argument must contain exactly 2 items.')
replace_list = [to_link(replace[0]), replace[1]]
if not isinstance(replace_list[0], pywikibot.Link):
raise ValueError(
'The original value must be either str, Link or Page '
'but is "{}"'.format(type(replace_list[0])))
if replace_list[1] is not False and replace_list[1] is not None:
if isinstance(replace_list[1], str):
replace_list[1] = pywikibot.Page(site, replace_list[1])
check_classes(replace_list[0])
replace = replace_callable
linktrail = site.linktrail()
link_pattern = re.compile(
r'\[\[(?P<title>.*?)(#(?P<section>.*?))?(\|(?P<label>.*?))?\]\]'
r'(?P<linktrail>{})'.format(linktrail))
extended_label_pattern = re.compile(r'(.*?\]\])({})'.format(linktrail))
linktrail = re.compile(linktrail)
curpos = 0
# This loop will run until we have finished the current page
while True:
m = link_pattern.search(text, pos=curpos)
if not m:
break
# Ignore links to sections of the same page
if not m.group('title').strip():
curpos = m.end()
continue
# Ignore interwiki links
if (site.isInterwikiLink(m.group('title').strip())
and not m.group('title').strip().startswith(':')):
curpos = m.end()
continue
groups = m.groupdict()
if groups['label'] and '[[' in groups['label']:
# TODO: Work on the link within the label too
# A link within a link, extend the label to the ]] after it
extended_match = extended_label_pattern.search(text, pos=m.end())
if not extended_match:
# TODO: Unclosed link label, what happens there?
curpos = m.end()
continue
groups['label'] += groups['linktrail'] + extended_match.group(1)
groups['linktrail'] = extended_match.group(2)
end = extended_match.end()
else:
end = m.end()
start = m.start()
# Since this point the m variable shouldn't be used as it may not
# contain all contents
del m
try:
link = pywikibot.Link.create_separated(
groups['title'], site, section=groups['section'],
label=groups['label'])
except (SiteDefinitionError, InvalidTitleError):
# unrecognized iw prefix or invalid title
curpos = end
continue
# Check whether the link found should be replaced.
# Either None, False or tuple(Link, bool)
new_link = replace(link, text, groups.copy(), (start, end))
if new_link is None:
curpos = end
continue
# The link looks like this:
# [[page_title|new_label]]new_linktrail
page_title = groups['title']
new_label = groups['label']
if not new_label:
# or like this: [[page_title]]new_linktrail
new_label = page_title
# remove preleading ":" from the link text
if new_label[0] == ':':
new_label = new_label[1:]
new_linktrail = groups['linktrail']
if new_linktrail:
new_label += new_linktrail
if new_link is False:
# unlink - we remove the section if there's any
assert isinstance(new_label, str), 'link text must be str.'
new_link = new_label
if isinstance(new_link, str):
# Nothing good can come out of the fact that bytes is returned so
# force unicode
text = text[:start] + new_link + text[end:]
# Make sure that next time around we will not find this same hit.
curpos = start + len(new_link)
continue
if isinstance(new_link, bytes):
raise ValueError('The result must be str and not bytes.')
# Verify that it's either Link, Page or str
check_classes(new_link)
# Use section and label if it's a Link and not otherwise
if isinstance(new_link, pywikibot.Link):
is_link = True
else:
new_link = new_link._link
is_link = False
new_title = new_link.canonical_title()
# Make correct langlink if needed
if new_link.site != site:
new_title = ':' + new_link.site.code + ':' + new_title
if is_link:
# Use link's label
new_label = new_link.anchor
must_piped = new_label is not None
new_section = new_link.section
else:
must_piped = True
new_section = groups['section']
if new_section:
new_title += '#' + new_section
if new_label is None:
new_label = new_title
# Parse the link text and check if it points to the same page
parsed_new_label = pywikibot.Link(new_label, new_link.site)
try:
parsed_new_label.parse()
except InvalidTitleError:
pass
else:
parsed_link_title = title_section(parsed_new_label)
new_link_title = title_section(new_link)
# compare title, but only with parts if linktrail works
if not linktrail.sub('',
parsed_link_title[len(new_link_title):]):
# TODO: This must also compare everything that was used as a
# prefix (in case insensitive)
must_piped = (
not parsed_link_title.startswith(new_link_title)
or parsed_new_label.namespace != new_link.namespace)
if must_piped:
new_text = '[[{}|{}]]'.format(new_title, new_label)
else:
new_text = '[[{}]]{}'.format(new_label[:len(new_title)],
new_label[len(new_title):])
text = text[:start] + new_text + text[end:]
# Make sure that next time around we will not find this same hit.
curpos = start + len(new_text)
return text
def add_text(text: str, add: str, *, site=None) -> str:
"""Add text to a page content above categories and interwiki.
.. versionadded:: 6.4
:param text: The page content to add text to.
:param add: Text to add.
:param site: The site that the text is coming from. Required for
reorder of categories and interlanguage links. Te default site
is used otherwise.
:type site: pywikibot.Site
"""
# Translating the \\n (e.g. from command line) into binary \n
add = add.replace('\\n', '\n')
# Getting the categories
categories_inside = getCategoryLinks(text, site)
# Deleting the categories
text = removeCategoryLinks(text, site)
# Getting the interwiki
interwiki_inside = getLanguageLinks(text, site)
# Removing the interwiki
text = removeLanguageLinks(text, site)
# Adding the text
text += '\n' + add
# Reputting the categories
text = replaceCategoryLinks(text, categories_inside, site, addOnly=True)
# Adding the interwiki
return replaceLanguageLinks(text, interwiki_inside, site)
# -------------------------------
# Functions dealing with sections
# -------------------------------
_Heading = namedtuple('_Heading', ('text', 'start', 'end'))
_Section = namedtuple('_Section', ('title', 'content'))
_Content = namedtuple('_Content', ('header', 'sections', 'footer'))
def _extract_headings(text: str, site) -> list:
"""Return _Heading objects."""
headings = []
heading_regex = _get_regexes(['header'], site)[0]
for match in heading_regex.finditer(text):
start, end = match.span()
if not isDisabled(text, start) and not isDisabled(text, end):
headings.append(_Heading(match.group(), start, end))
return headings
def _extract_sections(text: str, headings) -> list:
"""Return _Section objects."""
if headings:
# Assign them their contents
contents = []
for i, heading in enumerate(headings):
try:
next_heading = headings[i + 1]
except IndexError:
contents.append(text[heading.end:])
else:
contents.append(text[heading.end:next_heading.start])
return [_Section(heading.text, content)
for heading, content in zip(headings, contents)]
return []
def extract_sections(
text: str, site=None
) -> NamedTuple('_Content', [('header', str), # noqa: F821
('body', List[Tuple[str, str]]), # noqa: F821
('footer', str)]): # noqa: F821
"""
Return section headings and contents found in text.
:return: The returned namedtuple contains the text parsed into
header, contents and footer parts: The header part is a string
containing text part above the first heading. The footer | |
# coding=utf-8
#
# Copyright (c) 2013-2015 First Flamingo Enterprise B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# TAMission.py
# firstflamingo/treinenaapje
#
# Created by <NAME> on 21-Feb-13.
#
import json, logging, random
from google.appengine.ext import db
from datetime import datetime, time, timedelta
from ffe import config
from ffe.gae import increase_counter, issue_tasks
from ffe.markup import XMLElement
from ffe.ffe_time import now_cet, mark_cet
from TABasics import TAModel
from TAStop import TAStop, StopStatuses, repr_list_from_stops
# ========== Mission Model ==========================================================================
class MissionStatuses:
inactive, announced, running, arrived, completed, canceled, ambivalent = range(7)
s = ['inactive', 'announced', 'running', 'arrived', 'completed', 'canceled', 'ambivalent']
class ODIDsProperty(db.TextProperty):
def validate(self, value):
return value
def get_value_for_datastore(self, model_instance):
dictionary = super(ODIDsProperty, self).get_value_for_datastore(model_instance)
if dictionary and len(dictionary) > 3:
dictionary = optimize_od_ids(dictionary)
serialized = json.dumps(dictionary)
return db.Text(serialized)
def make_value_from_datastore(self, value):
dictionary = json.loads(str(value))
return super(ODIDsProperty, self).make_value_from_datastore(dictionary)
class StopsListProperty(db.TextProperty):
def validate(self, value):
return value
def get_value_for_datastore(self, model_instance):
stopsList = super(StopsListProperty, self).get_value_for_datastore(model_instance)
serialized = json.dumps(repr_list_from_stops(stopsList))
return db.Text(serialized)
def make_value_from_datastore(self, value):
reprList = json.loads(str(value))
stopsList = []
for repr in reprList:
stopsList.append(TAStop.fromRepr(repr))
return super(StopsListProperty, self).make_value_from_datastore(stopsList)
class TAMission(TAModel):
# Stored attributes:
series_id = db.StringProperty()
_odIDsDictionary = ODIDsProperty()
nominalDate = db.DateProperty(indexed=False)
offset = db.TimeProperty(indexed=False)
_stops = StopsListProperty()
# Transient attributes
_number = None
delay = 0.0
delay_update_limit = None
needs_datastore_put = False
issue_time = None
tasks = None
# ------ Object lifecycle ---------------------------------------------
def awake_from_create(self):
series = self.series
if self.supplementary:
original = self.original_mission
if original:
self.offset = original.offset
if series:
series.add_mission(self)
# ------ Mission identity ---------------------------------------------
@property
def number(self):
if self._number is None:
self._number = int(self.code)
return self._number
@property
def base_number(self):
return self.number % 100000
@property
def ordinal(self):
if self.country == 'eu':
return self.number % 10
else:
return self.number % 100
@property
def up(self):
return self.number % 2
# ------ Defining the mission -----------------------------------------
@property
def offset_time(self):
if self.offset:
return self.offset
else:
return time(0)
@offset_time.setter
def offset_time(self, value):
if value != self.offset:
self.offset = value
if not self.supplementary:
self.needs_datastore_put = True
@property
def offset_cet(self):
return mark_cet(datetime.combine(self.nominalDate, self.offset_time))
@offset_cet.setter
def offset_cet(self, value):
rounded_time = round_mission_offset(value)
self.nominalDate = rounded_time.date()
self.offset = rounded_time.time()
if not self.supplementary:
self.needs_datastore_put = True
@property
def offset_string(self):
if self.offset is None:
return '-'
else:
return self.offset.strftime('%H:%M')
@property
def date_string(self):
if self.nominalDate is None:
return '-'
else:
return self.nominalDate.strftime('%d-%m-%Y')
@offset_string.setter
def offset_string(self, value):
if value == '-':
new_offset = None
else:
dt = mark_cet(datetime.strptime(value, '%H:%M'))
new_offset = dt.time()
self.offset_time = new_offset
@property
def weekday(self):
if self.nominalDate in config.OFFICIAL_HOLIDAYS:
return 6
return self.nominalDate.weekday()
@property
def supplementary(self):
return self.number // 100000
@property
def original_mission(self):
if self.supplementary:
return TAMission.get('%s.%d' % (self.country, self.base_number))
else:
return self
# ------ Connecting to series -----------------------------------------
@property
def series_number(self):
if self.country == 'eu':
return self.base_number // 10
else:
return self.base_number // 100
@property
def series(self):
if self.series_id == 'orphan':
return None
if not self.series_id:
nr = int(self.number % 1E5)
if self.country == 'eu':
self.series_id = config.INTERNATIONAL_SERIES.get(nr//10, 'eu.000')
else:
self.series_id = '%s.%03d' % (self.country, nr // 100)
series = TAModel.get(self.series_id, class_name='TASeries')
if not series:
self.series_id = 'orphan'
return series
# ------ Managing origin/destination IDs ----------------------------
@property
def odIDs_dictionary(self):
if not self._odIDsDictionary:
self._odIDsDictionary = {'d': [None, None]}
return self._odIDsDictionary
@odIDs_dictionary.setter
def odIDs_dictionary(self, new_dict):
if new_dict != self._odIDsDictionary:
self._odIDsDictionary = new_dict
self.needs_datastore_put = True
def get_odIDs_for_weekday(self, weekday):
key = str(weekday)
return self.odIDs_dictionary.get(key, self.odIDs_dictionary['d'])
def set_odIDs_for_weekday(self, weekday, new_ids):
if self.get_odIDs_for_weekday(weekday) != new_ids:
self.odIDs_dictionary[str(weekday)] = new_ids
self.needs_datastore_put = True
@property
def odIDs(self):
return self.get_odIDs_for_weekday(self.weekday)
@odIDs.setter
def odIDs(self, newIDs):
self.set_odIDs_for_weekday(self.weekday, newIDs)
def get_odIDs_string(self, weekday):
ids = self.get_odIDs_for_weekday(weekday)
if ids[0] is None:
return '-'
else:
return '%s-%s' % (ids[0], ids[1])
def set_odIDs_string(self, weekday, string):
if string == '-':
ids = [None, None]
else:
comps = string.split('-')
ids = [comps[0], comps[1]]
self.set_odIDs_for_weekday(weekday, ids)
@property
def origin_id(self):
return self.odIDs[0]
@origin_id.setter
def origin_id(self, new_id):
self.odIDs = [new_id, self.destination_id]
@property
def destination_id(self):
return self.odIDs[1]
@destination_id.setter
def destination_id(self, new_id):
self.odIDs = [self.origin_id, new_id]
def optimize_odIDs_dictionary(self):
self.odIDs_dictionary = optimize_od_ids(self.odIDs_dictionary)
# ------ Providing output -----------------------------------------
@property
def stops_repr(self):
return repr_list_from_stops(self.stops)
@property
def repr(self):
return {'id': self.id, 'stops': self.stops_repr}
@property
def xml(self):
array = []
for key, value in self.odIDs_dictionary.iteritems():
array.append(XMLElement('od', {'day': key, 'from': value[0], 'to': value[1]}, []))
return XMLElement('mission', {'id': self.id, 'offset': self.offset_time.strftime('%H:%M')}, array)
# ------ Connecting to ScheduledPoint ----------------------------------------
def create_stop_from_point(self, point):
stop = TAStop()
stop.station_id = point.station_id
stop.mission_id = self.id
stop.status = StopStatuses.planned
stop.arrival = self.offset_cet + self.arrival_delta_with_point(point)
stop.departure = self.offset_cet + self.departure_delta_with_point(point)
stop.delay_dep = 0.0
stop.platform = point.platform_string(self.up)
return stop
def arrival_delta_with_point(self, point):
if self.up:
return timedelta(minutes=point.upArrival)
else:
return timedelta(minutes=point.downArrival)
def departure_delta_with_point(self, point):
if self.up:
return timedelta(minutes=point.upDeparture)
else:
return timedelta(minutes=point.downDeparture)
def pattern_minutes_at_stop(self, stop):
dt = stop.departure - self.offset_cet
return dt.seconds // 60
# ------ Managing stops --------------------------------------------
@property
def stops(self):
if self._stops is None:
self._stops = []
return self._stops
@stops.setter
def stops(self, stops):
self._stops = stops
@property
def first_stop(self):
if self._stops:
return self.stops[0]
else:
return None
@property
def last_stop(self):
if self._stops:
return self.stops[-1]
else:
return None
@property
def est_arrival_cet(self):
est_arrival = self.last_stop.arrival
if not est_arrival:
est_arrival = self.last_stop.departure
return est_arrival + timedelta(minutes=self.last_stop.delay_dep)
@property
def destination(self):
if not self.last_stop:
return 'no stops'
last_station = self.last_stop.station.name
announced_destination = self.last_stop.destination
if last_station == announced_destination:
return last_station
else:
return '%s (%s)' % (last_station, announced_destination)
def awake_stops(self):
series = self.series
from_id, to_id = self.odIDs
all_stops = []
for point in series.points_in_range(from_id, to_id):
new_stop = self.create_stop_from_point(point)
all_stops.append(new_stop)
if all_stops:
all_stops[-1].status = StopStatuses.finalDestination
last_station = all_stops[-1].station
for stop in all_stops:
stop.destination = last_station.name
self.stops = all_stops
def update_stop(self, updated):
now = updated.now
if now is None:
now = now_cet()
status, delay = self.status_at_time(now)
if status == MissionStatuses.arrived:
logging.info('Update was ignored because mission has already arrived')
return
changes = False
small_changes = False
self.issue_time = now
index = self.index_for_stop(updated)
if index is not None:
existing = self.stops[index]
self.tasks = []
if existing.status != updated.status:
logging.info('Change status at %s from %s to %s.' % (existing.station_id,
StopStatuses.s[existing.status],
StopStatuses.s[updated.status]))
if updated.status == StopStatuses.revoked:
self.remove_stop(index)
existing = None
changes = True
else:
if existing.status == StopStatuses.planned and updated.status == StopStatuses.announced:
small_changes = True
elif existing.status == StopStatuses.altDestination and updated.status == StopStatuses.announced:
self.reset_destination()
changes = True
else:
if updated.status == StopStatuses.canceled:
self.check_for_canceled(index - 1)
self.check_for_canceled(index + 1)
elif existing.status == StopStatuses.canceled:
self.check_for_uncanceled(index - 1)
self.check_for_uncanceled(index + 1)
changes = True
existing.status = updated.status
if existing is not None:
if existing.delay_dep != updated.delay_dep:
logging.info('Change delay at %s from %.1f to %.1f.' %
(existing.station_id, existing.delay_dep, updated.delay_dep))
next_index = self.next_stop_index(now)
if index == next_index:
increasing = bool(existing.delay_dep < updated.delay_dep)
self.update_delay(index, updated.delay_dep, increasing)
self.schedule_more_updates(updated, now)
else:
if next_index is not None and existing.delay_dep == 0:
next_stop = self.stops[next_index]
self.issue_time += timedelta(seconds=config.INTERVAL_BETWEEN_UPDATE_MSG)
self.tasks.append(self.instruction_task(next_stop.station_url, 'prio', self.issue_time))
existing.delay_dep = updated.delay_dep
changes = True
if existing.platform != updated.platform and updated.platform is not None:
logging.info('Change platform at %s from %s to %s.' %
(existing.station_id, existing.platform, updated.platform))
existing.platform = updated.platform
changes = True
if existing.platformChange != updated.platformChange:
existing.platformChange = updated.platformChange
if existing.destination != updated.destination and updated.destination is not None:
logging.info('Change destination at %s from %s to %s.' %
(existing.station_id, existing.destination, updated.destination))
existing.destination = updated.destination
changes = True
self.update_destination(updated.destination)
if existing.alteredDestination != updated.alteredDestination:
logging.info('Change altered destination at %s from %s to %s.' %
(existing.station_id, existing.alteredDestination, updated.alteredDestination))
if updated.alteredDestination is None:
self.reset_destination()
else:
self.alter_destination(updated.alteredDestination)
existing.alteredDestination = updated.alteredDestination
changes = True
if existing.departure != updated.departure and updated.departure is not None:
logging.info('Change departure at %s from %s to %s.' %
(existing.station_id, existing.departure.strftime('%H:%M'), updated.departure.strftime('%H:%M')))
logging.info('%s ==> %s' % (existing.departure, updated.departure))
delta = updated.departure - existing.departure
existing.arrival += delta
existing.departure = updated.departure
changes = True
issue_tasks(self.tasks)
self.tasks = None
else:
if updated.status == StopStatuses.announced or updated.status == StopStatuses.extra:
self.anterior_stops(updated)
changes = True
if changes:
increase_counter('mission_changes')
self.put()
else:
if small_changes:
increase_counter('mission_small_changes')
self.cache_set()
else:
increase_counter('mission_no_changes')
def remove_stop(self, index):
if index == 0:
if len(self.stops) == 1 | |
<filename>eeg_analyses/BDM.py<gh_stars>0
import os
import mne
import pickle
import random
import copy
import itertools
#import matplotlib
#matplotlib.use('agg') # now it works via ssh connection
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from support.FolderStructure import *
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from mne.decoding import (SlidingEstimator, GeneralizingEstimator,
cross_val_multiscore, LinearModel, get_coef)
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from support.support import select_electrodes, trial_exclusion
from scipy.stats import rankdata
from IPython import embed
class BDM(FolderStructure):
def __init__(self, beh, EEG, to_decode, nr_folds, method = 'auc', elec_oi = 'all', downsample = 128, bdm_filter = None, baseline = None):
'''
Arguments
- - - - -
method (str): the method used to compute classifier performance. Available methods are:
acc (default) - computes balanced accuracy (number of correct classifications per class,
% averaged over all classes)
auc - computes Area Under the Curve
Returns
- - - -
'''
self.beh = beh
self.EEG = EEG.apply_baseline(baseline = baseline)
self.to_decode = to_decode
self.nr_folds = nr_folds
self.elec_oi = elec_oi
self.downsample = downsample
self.bdm_filter = bdm_filter
self.method = method
if bdm_filter != None:
self.bdm_type = bdm_filter.keys()[0]
self.bdm_band = bdm_filter[self.bdm_type]
else:
self.bdm_type = 'broad'
def selectBDMData(self, EEG, beh, time, excl_factor = None):
'''
Arguments
- - - - -
EEG (object):
beh (dataFrame):
time (tuple | list): time samples (start to end) for decoding
excl_factor (dict): see Classify documentation
Returns
- - - -
eegs (array): eeg data (trials X electrodes X time)
beh (dict): contains variables of interest for decoding
'''
# check whether trials need to be excluded
if type(excl_factor) == dict: # remove unwanted trials from beh
beh, EEG = trial_exclusion(beh, EEG, excl_factor)
# apply filtering and downsampling (if specified)
if self.bdm_type != 'broad':
EEG = EEG.filter(h_freq=self.bdm_band[0], l_freq=self.bdm_band[1],
method = 'iir', iir_params = dict(ftype = 'butterworth', order = 5))
if self.downsample != int(EEG.info['sfreq']):
print('downsampling data')
EEG.resample(self.downsample)
# select time window and EEG electrodes
s, e = [np.argmin(abs(EEG.times - t)) for t in time]
picks = mne.pick_types(EEG.info, eeg=True, exclude='bads')
picks = select_electrodes(np.array(EEG.ch_names)[picks], self.elec_oi)
eegs = EEG._data[:,picks,s:e]
times = EEG.times[s:e]
# if specified average over trials
#beh, eegs = self.averageTrials(beh, eegs, self.to_decode, cnd_header, 4)
# store dictionary with variables for plotting
plot_dict = {'ch_names': EEG.ch_names, 'times':times, 'info':EEG.info}
with open(self.FolderTracker(['bdm',self.to_decode], filename = 'plot_dict.pickle'),'wb') as handle:
pickle.dump(plot_dict, handle)
return eegs, beh, times
def averageTrials(self, X, Y, trial_avg):
# DOWNSIDE OF ONLY AVERAGING AT THIS STAGE AND WHIT PRSENT METHOD IS THAT TRIALS ARE LOSSED
# initiate arrays
x_, y_ = [], []
# loop over each label in Y
for label in np.unique(Y):
idx = np.where(Y[0] == label)[0]
# note that trial order is shuffled
list_of_groups = zip(*(iter(idx),) * trial_avg)
for sub in list_of_groups:
x_.append(X[:,np.array(sub)].mean(axis = 1))
y_.append(label)
# create averaged x and y arrays
x_ = np.swapaxes(np.stack(x_),0,1)
y_ = np.array([y_,]*Y.shape[0])
return x_, y_
def Classify(self,sj, cnds, cnd_header, time, collapse = False, bdm_labels = 'all', excl_factor = None, nr_perm = 0, gat_matrix = False, downscale = False, save = True):
'''
Arguments
- - - - -
sj(int): subject number
cnds (list): list of condition labels (as stored in beh dict).
cnd_header (str): variable name containing conditions of interest
bdm_labels (list | str): Specifies whether all labels or only a subset of labels should be decoded
excl_factor (dict | None): This gives the option to exclude specific conditions from analysis.
For example, to only include trials where the cue was pointed to the left and not to the right specify
the following: factor = dict('cue_direc': ['right']). Mutiple column headers and multiple variables per
header can be specified
time (tuple | list): time samples (start to end) for decoding
collapse (boolean): If True also run analysis collapsed across all conditions
nr_perm (int): If perm = 0, run standard decoding analysis.
If perm > 0, the decoding is performed on permuted labels.
The number sets the number of permutations
gat_matrix (bool): If True, train X test decoding analysis is performed
downscale (bool): If True, decoding is repeated with increasingly less trials. Set to True if you are
interested in the minumum number of trials that support classification
save (bool): sets whether output is saved (via standard file organization) or returned
Returns
- - - -
'''
# first round of classification is always done on non-permuted labels
nr_perm += 1
# read in data
eegs, beh, times = self.selectBDMData(self.EEG, self.beh, time, excl_factor)
# select minumum number of trials given the specified conditions
max_tr = [self.selectMaxTrials(beh, cnds, bdm_labels,cnd_header)]
if downscale:
max_tr = [(i+1)*self.nr_folds for i in range(max_tr[0]/self.nr_folds)][::-1]
# create dictionary to save classification accuracy
classification = {'info': {'elec': self.elec_oi, 'times':times}}
if collapse:
beh['collapsed'] = 'no'
cnds += ['collapsed']
# loop over conditions
for cnd in cnds:
# reset selected trials
bdm_info = {}
if cnd != 'collapsed':
cnd_idx = np.where(beh[cnd_header] == cnd)[0]
if collapse:
beh['collapsed'][cnd_idx] = 'yes'
else:
# reset max_tr again such that analysis is not underpowered
max_tr = [self.selectMaxTrials(beh, ['yes'], bdm_labels,'collapsed')]
cnd_idx = np.where(np.sum(
[(beh[cnd_header] == c).values for c in cnds],
axis = 0))[0]
cnd_labels = beh[self.to_decode][cnd_idx].values
if bdm_labels != 'all':
sub_idx = [i for i,l in enumerate(cnd_labels) if l in bdm_labels]
cnd_idx = cnd_idx[sub_idx]
cnd_labels = cnd_labels[sub_idx]
labels = np.unique(cnd_labels)
print ('\nYou are decoding {}. The nr of trials used for folding is set to {}'.format(cnd, max_tr[0]))
# initiate decoding array
if gat_matrix:
class_acc = np.empty((nr_perm, eegs.shape[2], eegs.shape[2])) * np.nan
label_info = np.empty((nr_perm, eegs.shape[2], eegs.shape[2], labels.size)) * np.nan
else:
class_acc = np.empty((nr_perm, eegs.shape[2])) * np.nan
label_info = np.empty((nr_perm, eegs.shape[2], labels.size)) * np.nan
# permutation loop (if perm is 1, train labels are not shuffled)
for p in range(nr_perm):
if p > 0: # shuffle condition labels
np.random.shuffle(cnd_labels)
for i, n in enumerate(max_tr):
if i > 0:
print('Minimum condition label downsampled to {}'.format(n))
bdm_info = {}
# select train and test trials
train_tr, test_tr, bdm_info = self.trainTestSplit(cnd_idx, cnd_labels, n, bdm_info)
Xtr, Xte, Ytr, Yte = self.trainTestSelect(beh[self.to_decode], eegs, train_tr, test_tr)
# TRIAL AVERAGING NEEDS UPDATING
#self.trial_avg = 3
#if self.trial_avg > 1:
# Xtr, Ytr = self.averageTrials(Xtr, Ytr, 2)
# Xte, Yte = self.averageTrials(Xte, Yte, 2)
# do actual classification
#class_acc[p], label_info[p] = self.linearClassification(eegs, train_tr, test_tr, n, cnd_labels, gat_matrix)
class_acc[p], label_info[p] = self.crossTimeDecoding(Xtr, Xte, Ytr, Yte, labels, gat_matrix)
if i == 0:
classification.update({cnd:{'standard': copy.copy(class_acc[0])}, 'bdm_info': bdm_info})
else:
classification[cnd]['{}-nrlabels'.format(n)] = copy.copy(class_acc[0])
if nr_perm > 1:
classification[cnd].update({'perm': class_acc[1:]})
# store classification dict
if save:
with open(self.FolderTracker(['bdm',self.elec_oi, self.to_decode], filename = 'class_{}-{}.pickle'.format(sj,self.bdm_type)) ,'wb') as handle:
pickle.dump(classification, handle)
else:
return classification
def localizerClassify(self, sj, loc_beh, loc_eeg, cnds, cnd_header, time, tr_header, te_header, collapse = False, loc_excl = None, test_excl = None, gat_matrix = False, save = True):
"""Training and testing is done on seperate/independent data files
Arguments:
sj {int} -- Subject number
loc_beh {DataFrame} -- DataFrame that contains labels necessary for training the model
loc_eeg {object} -- EEG data used to train the model (MNE Epochs object)
cnds {list} -- List of conditions. Decoding is done for each condition seperately
cnd_header {str} -- Name of column that contains condition info in test behavior file
time {tuple} -- Time window used for decoding
tr_header {str} -- Name of column that contains training labels
te_header {[type]} -- Name of column that contains testing labels
Keyword Arguments:
collapse {bool} -- If True also run analysis collapsed across all conditions
loc_excl {dict| None} -- Option to exclude trials from localizer. See Classify for more info (default: {None})
test_excl {[type]} -- Option to exclude trials from (test) analysis. See Classify for more info (default: {None})
gat_matrix {bool} -- If set to True, a generalization across time matrix is created (default: {False})
save {bool} -- Determines whether output is saved (via standard file organization) or returned (default: {True})
Returns:
classification {dict} -- Decoding output (for each condition seperately)
"""
# set up localizer data
tr_eegs, tr_beh, times = self.selectBDMData(loc_eeg, loc_beh, time, loc_excl)
# set up test data
te_eegs, te_beh, times = self.selectBDMData(self.EEG, self.beh, time, test_excl)
# create dictionary to save classification accuracy
classification = {'info': {'elec': self.elec_oi, 'times':times}}
# specify training parameters (fixed for all testing conditions)
tr_labels = tr_beh[tr_header].values
min_nr_tr_labels = min(np.unique(tr_labels, return_counts = True)[1])
# make sure training is not biased towards a label
tr_idx = np.hstack([random.sample(np.where(tr_beh[tr_header] == label )[0],
k = min_nr_tr_labels) for label in np.unique(tr_labels)])
Ytr = tr_beh[tr_header][tr_idx].values.reshape(1,-1)
Xtr = tr_eegs[tr_idx,:,:][np.newaxis, ...]
if collapse:
cnds += ['collapsed']
# loop over all conditions
for cnd in cnds:
# set condition mask
if cnd != 'collapsed':
test_mask = (te_beh[cnd_header] == cnd).values
else:
test_mask = np.array(np.sum(
[(beh[cnd_header] == c).values for c in cnds],
axis = 0), dtype = bool)
# specify testing parameters
Yte = te_beh[te_header][test_mask].values.reshape(1,-1)
Xte = te_eegs[test_mask,:,:][np.newaxis, ...]
# do actual classification
class_acc, label_info = self.crossTimeDecoding(Xtr, Xte, Ytr, Yte, np.unique(Ytr), gat_matrix)
classification.update({cnd:{'standard': copy.copy(class_acc)}})
# store classification dict
if save:
with open(self.FolderTracker(['bdm',self.elec_oi, 'cross'], filename = 'class_{}-{}.pickle'.format(sj,te_header)) ,'wb') as handle:
pickle.dump(classification, handle)
return classification
def crossClassify(self, sj, cnds, cnd_header, time, tr_header, te_header, tr_te_rel = 'ind', excl_factor = None, tr_factor = None, te_factor = None, bdm_labels = 'all', gat_matrix = False, save = True, bdm_name = 'cross'):
'''
UPdate function but it does the trick
'''
# read in data
print ('NR OF TRAIN LABELS DIFFER PER CONDITION!!!!')
print ('DOES NOT YET CONTAIN FACTOR SELECTION FOR DEPENDENT DATA')
eegs, beh, times = self.selectBDMData(self.EEG, self.beh, time, excl_factor)
nr_time = times.size
if | |
the estimation job.
Raises
------
KeyError
Non-unique unknowns are provided.
ValueError
Invalid unknowns shall be estimated.
ValueError
No bounds are provided for use of differential evolution optimizer.
TypeError
A list containing not only Measurement objects is provided.
Warns
-----
UserWarning
The `optimizer_kwargs` argument is not None.
"""
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError('Must provide a list of Measurement objects')
_start = time.time()
if not Helpers.has_unique_ids(unknowns):
raise KeyError(Messages.bad_unknowns)
# test if parameters are estimated that are not known to the model
_valid_unknowns = self._get_valid_parameter_names()
for _unknown in unknowns:
if _unknown not in _valid_unknowns:
raise ValueError(f'Detected invalid unknown to be estimated: {_unknown} vs. {_valid_unknowns}')
if use_global_optimizer == False and not isinstance(unknowns, dict):
raise ValueError('Must provide initial guesses to use the local minimizer')
# sort unknowns and corresponding bounds alphabetically and case-insensitive
# Decide also whether to use the local or global minimizer
_unknowns_names_sorted = sorted(unknowns, key=str.lower)
if isinstance(unknowns, dict):
_unknowns = {_unknown_name : unknowns[_unknown_name] for _unknown_name in _unknowns_names_sorted}
elif isinstance(unknowns, list):
_unknowns = {_unknown_name : None for _unknown_name in _unknowns_names_sorted}
if use_global_optimizer is None:
use_global_optimizer = True
if use_global_optimizer and bounds is None:
raise ValueError(Messages.missing_bounds)
if bounds is not None:
try:
_bounds = [bounds[unknowns.index(_unknown_name)] for _unknown_name in _unknowns_names_sorted]
except AttributeError:
_bounds = [bounds[list(unknowns.keys()).index(_unknown_name)] for _unknown_name in _unknowns_names_sorted]
# To protect for integration error when a bound is integer 0
_bounds = Helpers.bounds_to_floats(_bounds)
else:
_bounds = None
# Check for optimizer_kwargs to be used
_optimizer_kwargs = {}
if self.optimizer_kwargs is None:
_warning_flag = False
else:
_optimizer_kwargs.update(self.optimizer_kwargs)
_warning_flag = True
# check if the keyword argument `optimizer_kwargs` was set, which has a higher priority over the corresponding Caretaker property
if optimizer_kwargs is not None:
_optimizer_kwargs = optimizer_kwargs
if _warning_flag:
warnings.warn('Using the `optimizer_kwargs` keyword argument overrides the Caretaker property `optimizer_kwargs`.', UserWarning)
if report_level >= 3:
verbosity_CVodeError = True
else:
verbosity_CVodeError = False
if report_level >= 4 and 'disp' not in _optimizer_kwargs.keys():
_optimizer_kwargs['disp'] = True
if use_global_optimizer:
minimizer_scope = 'differential evolution optimizer'
if 'popsize' not in _optimizer_kwargs.keys():
popsize = 5*len(_unknowns)
_optimizer_kwargs['popsize'] = popsize
opt = differential_evolution(self._loss_fun_scipy,
bounds=_bounds,
args=(_unknowns,
metric,
measurements,
handle_CVodeError,
verbosity_CVodeError,
),
**_optimizer_kwargs,
)
else:
minimizer_scope = 'local minimizer'
if 'disp' in _optimizer_kwargs.keys():
options = {'disp' : _optimizer_kwargs['disp']}
del _optimizer_kwargs['disp']
_optimizer_kwargs['options'] = options
opt = minimize(self._loss_fun_scipy,
list(_unknowns.values()),
args=(_unknowns,
metric,
measurements,
handle_CVodeError,
verbosity_CVodeError,
),
bounds=_bounds,
**_optimizer_kwargs,
)
# Preparing returns
estimations = {_unknown : value for _unknown, value in zip(_unknowns, opt.x)}
estimation_info = {}
estimation_info['opt_info'] = opt
if metric in list(PRETTY_METRICS.keys()):
estimation_info['metric'] = PRETTY_METRICS[metric]
else:
estimation_info['metric'] = metric
estimation_info['loss'] = opt.fun
_end = time.time()
estimation_info['runtime_min'] = (_end - _start)/60
if report_level >= 1:
print(f'\n----------Results from {minimizer_scope}')
print('\nEstimated parameters:')
for estimation in estimations.keys():
print(f'{estimation}: {estimations[estimation]}')
print(f'\nRuntime was {estimation_info["runtime_min"]:.2f} min')
if report_level >= 2:
print('\n----------')
print(opt)
return estimations, estimation_info
def estimate_parallel(self,
unknowns:list, measurements:List[Measurement], bounds:List[Tuple],
metric:str='negLL', report_level:int=0,
optimizers:List[str]='de1220', optimizers_kwargs:List[dict]={}, log_each_nth_gen:int=None,
rel_pop_size:float=10.0, evolutions:int=5, archipelago_kwargs:dict={},
atol_islands:float=None, rtol_islands:float=1e-6,
max_runtime_min:float=None,
max_evotime_min:float=None,
max_memory_share:float=0.95,
handle_CVodeError:bool=True,
loss_calculator:LossCalculator=LossCalculator,
) -> Tuple[dict, ParallelEstimationInfo]:
"""
Estimates values for requested unknowns according to a specific metric, given some measurements,
using the generalized island model for parallelization that allows for global optimization.
This is provided by the pygmo package, which runs parallel evolutions of populations,
with migration of improved variants between the populations occuring.
For further info and use of pygmo, see https://github.com/esa/pygmo2, doi:10.5281/zenodo.3603747.
Arguments
---------
unknowns : dict or list
The parameters to be estimated. Can be any of the model parameters, initial values or observation parameters.
measurements : List[Measurement]
The data from which the parameter estimation is performed.
Can provide a Measurement object for any model state or observation.
bounds : list of tuples
Bounds for for each unknown to be estimated, in the following form [(lower, upper), ...]
Keyword arguments
----------------
metric : str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), `SS` (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement object are accordingly specified.
report_level : int
Enables informative output about the estimation process. Information will be printed after each evolution.
Default is 0, which is no output.
1 = Prints the best loss, as well as information about archipelago creation and evolution.
For each completed evolution, a dot is printed.
2 = Prints additionally the best loss after each evolution
3 = Prints additionally average loss among all islands, and the runtime of each evolution.
4 = Prints additionally the parameter values for the best loss, and the average parameter values
among the champions of all islands in the `archipelago` after the evolutions.
optimizers : List[str] or str
A list of names for the pygmo optimization algorithms of choice. For a list of such to be conveniently used,
see `PygmoOptimizers` class of this module.
In case a list with one item is used, this optimizer is used for all explicitly
or implicitly (default None of `n_islands`) defined nunmber islands.
In case a list with >1 optimizers is used, the corresponding number of islands will be created within the archipelago.
The currently supported list of optimizer can be found at pyfoomb.generalized_islands.PygmoOptimizers.optimizers
Default is `de1220`, which makes each island to use this algorithm.
optimizers_kwargs : List[dict] or dict
A list of optimizer_kwargs as dicts, corresponding to the list of optimizers.
In case more >1 optimizers are used, the 1-item list of optimizer_kwargs will be applied to all of the optimizers.
Default is `[{}]`, i.e. no additional optimizer kwargs.
log_each_nth_gen : int
Specifies at which each n-th generation the algorithm stores logs.
Can be later extracted from the returned `archipelago` instance.
Note that only the log from the last evolution is stored in the archipelago.
Default is None, which disables logging.
rel_pop_size : float
Determines the population size on each island, relative to the number of unknown to be estimated,
i.e. pop_size = rel_pop_size * len(unknowns), rounded to the next integer.
Default is 10, which creates population sizes 10 times the number of unknowns.
evolutions : int
Defines how often the populations on the islands are evolved.
Migrations between the populations of the islands occur after each finished evolution.
Migration depends of the topology of the archipelago, as well as the defined migration polices,
which are parts of `archipelago_kwargs`.
Default is 5, which triggers five rounds of evolution.
archipelago_kwargs : dict
The keyword arguments for instantiation of the archipelago.
In case `archipelago_kwargs` has no key `t`, the `pygmo.fully_connected()` topology will be used
Default is {}, i.e. an empty dictionary, which implies the use of `pygmo.fully_connected()` topology.
atol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is None, which implies no effect for this argument.
rtol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is 1e-6.
max_runtime_min : float
The maximun time in min the estimation process may take. The current runtimee is evaluated after each completion of an evolution.
Default is None, which implies there is no maximum runtime for the estimation process.
max_evotime_min : float
The maximum cumulative pure evolution time the estimation process is allowed to take.
In contrast to the `max_runtime_min` stopping criterion, only the evolution runtime is considered,
without runtime needed for checking stopping criteria, reporting prints outs between each evolution, etc.
Default is None.
max_memory_share : float
Defines the allowed memory share in usage, for which no evolutions are run anymore.
| |
from pdfminer.utils import PDFDocEncoding
from pdfminer.psparser import PSLiteral
from pdfminer.pdftypes import PDFObjRef
from decimal import Decimal, ROUND_HALF_UP
import numbers
from operator import itemgetter
import itertools
from functools import lru_cache as cache
DEFAULT_X_TOLERANCE = 3
DEFAULT_Y_TOLERANCE = 3
def cluster_list(xs, tolerance=0):
tolerance = decimalize(tolerance)
if tolerance == Decimal(0):
return [[x] for x in sorted(xs)]
if len(xs) < 2:
return [[x] for x in sorted(xs)]
groups = []
xs = list(sorted(xs))
current_group = [xs[0]]
last = xs[0]
for x in xs[1:]:
if x <= (last + tolerance):
current_group.append(x)
else:
groups.append(current_group)
current_group = [x]
last = x
groups.append(current_group)
return groups
def make_cluster_dict(values, tolerance):
tolerance = decimalize(tolerance)
clusters = cluster_list(set(values), tolerance)
nested_tuples = [
[(val, i) for val in value_cluster] for i, value_cluster in enumerate(clusters)
]
cluster_dict = dict(itertools.chain(*nested_tuples))
return cluster_dict
def cluster_objects(objs, attr, tolerance):
if isinstance(attr, (str, int)):
attr_getter = itemgetter(attr)
else:
attr_getter = attr
objs = to_list(objs)
values = map(attr_getter, objs)
cluster_dict = make_cluster_dict(values, tolerance)
get_0, get_1 = itemgetter(0), itemgetter(1)
cluster_tuples = sorted(
((obj, cluster_dict.get(attr_getter(obj))) for obj in objs), key=get_1
)
grouped = itertools.groupby(cluster_tuples, key=get_1)
clusters = [list(map(get_0, v)) for k, v in grouped]
return clusters
def decode_text(s):
"""
Decodes a PDFDocEncoding string to Unicode.
Adds py3 compatibility to pdfminer's version.
"""
if type(s) == bytes and s.startswith(b"\xfe\xff"):
return str(s[2:], "utf-16be", "ignore")
else:
ords = (ord(c) if type(c) == str else c for c in s)
return "".join(PDFDocEncoding[o] for o in ords)
def decode_psl_list(_list):
return [
decode_text(value.name) if isinstance(value, PSLiteral) else value
for value in _list
]
def resolve(x):
if type(x) == PDFObjRef:
return x.resolve()
else:
return x
def get_dict_type(d):
if type(d) is not dict:
return None
t = d.get("Type")
if type(t) is PSLiteral:
return decode_text(t.name)
else:
return t
def resolve_all(x):
"""
Recursively resolves the given object and all the internals.
"""
t = type(x)
if t == PDFObjRef:
resolved = x.resolve()
# Avoid infinite recursion
if get_dict_type(resolved) == "Page":
return x
return resolve_all(resolved)
elif t in (list, tuple):
return t(resolve_all(v) for v in x)
elif t == dict:
if get_dict_type(x) == "Annot":
exceptions = ["Parent"]
else:
exceptions = []
return dict((k, v if k in exceptions else resolve_all(v)) for k, v in x.items())
else:
return x
@cache(maxsize=int(10e4))
def _decimalize(v, q=None):
# Convert int-like
if isinstance(v, numbers.Integral):
return Decimal(int(v))
# Convert float-like
elif isinstance(v, numbers.Real):
if q is not None:
return Decimal(repr(v)).quantize(Decimal(repr(q)), rounding=ROUND_HALF_UP)
else:
return Decimal(repr(v))
else:
raise ValueError(f"Cannot convert {v} to Decimal.")
def decimalize(v, q=None):
# If already a decimal, just return itself
if type(v) == Decimal:
return v
# If tuple/list passed, bulk-convert
if isinstance(v, (tuple, list)):
return type(v)(decimalize(x, q) for x in v)
else:
return _decimalize(v, q)
def is_dataframe(collection):
cls = collection.__class__
name = ".".join([cls.__module__, cls.__name__])
return name == "pandas.core.frame.DataFrame"
def to_list(collection):
if is_dataframe(collection):
return collection.to_dict("records") # pragma: nocover
else:
return list(collection)
def dedupe_chars(chars, tolerance=1):
"""
Removes duplicate chars — those sharing the same text, fontname, size,
and positioning (within `tolerance`) as other characters in the set.
"""
key = itemgetter("fontname", "size", "upright", "text")
pos_key = itemgetter("doctop", "x0")
t = decimalize(tolerance)
def yield_unique_chars(chars):
sorted_chars = sorted(chars, key=key)
for grp, grp_chars in itertools.groupby(sorted_chars, key=key):
for y_cluster in cluster_objects(grp_chars, "doctop", t):
for x_cluster in cluster_objects(y_cluster, "x0", t):
yield sorted(x_cluster, key=pos_key)[0]
deduped = yield_unique_chars(chars)
return sorted(deduped, key=chars.index)
def collate_line(line_chars, tolerance=DEFAULT_X_TOLERANCE):
tolerance = decimalize(tolerance)
coll = ""
last_x1 = None
for char in sorted(line_chars, key=itemgetter("x0")):
if (last_x1 is not None) and (char["x0"] > (last_x1 + tolerance)):
coll += " "
last_x1 = char["x1"]
coll += char["text"]
return coll
def objects_to_rect(objects):
return {
"x0": min(map(itemgetter("x0"), objects)),
"x1": max(map(itemgetter("x1"), objects)),
"top": min(map(itemgetter("top"), objects)),
"bottom": max(map(itemgetter("bottom"), objects)),
}
def objects_to_bbox(objects):
return (
min(map(itemgetter("x0"), objects)),
min(map(itemgetter("top"), objects)),
max(map(itemgetter("x1"), objects)),
max(map(itemgetter("bottom"), objects)),
)
obj_to_bbox = itemgetter("x0", "top", "x1", "bottom")
def bbox_to_rect(bbox):
return {"x0": bbox[0], "top": bbox[1], "x1": bbox[2], "bottom": bbox[3]}
DEFAULT_WORD_EXTRACTION_SETTINGS = dict(
x_tolerance=DEFAULT_X_TOLERANCE,
y_tolerance=DEFAULT_Y_TOLERANCE,
keep_blank_chars=False,
use_text_flow=False,
horizontal_ltr=True, # Should words be read left-to-right?
vertical_ttb=True, # Should vertical words be read top-to-bottom?
extra_attrs=[],
)
class WordExtractor:
def __init__(self, **settings):
for s, val in settings.items():
if s not in DEFAULT_WORD_EXTRACTION_SETTINGS:
raise ValueError(f"{s} is not a valid WordExtractor parameter")
if s in ["x_tolerance", "y_tolerance"]:
val = decimalize(val)
setattr(self, s, val)
def merge_chars(self, ordered_chars):
x0, top, x1, bottom = objects_to_bbox(ordered_chars)
upright = ordered_chars[0]["upright"]
direction = 1 if (self.horizontal_ltr if upright else self.vertical_ttb) else -1
word = {
"text": "".join(map(itemgetter("text"), ordered_chars)),
"x0": x0,
"x1": x1,
"top": top,
"bottom": bottom,
"upright": upright,
"direction": direction,
}
for key in self.extra_attrs:
word[key] = ordered_chars[0][key]
return word
def char_begins_new_word(self, current_chars, next_char):
upright = current_chars[0]["upright"]
intraline_tol = self.x_tolerance if upright else self.y_tolerance
interline_tol = self.y_tolerance if upright else self.x_tolerance
word_x0, word_top, word_x1, word_bottom = objects_to_bbox(current_chars)
return (
(next_char["x0"] > word_x1 + intraline_tol)
or (next_char["x1"] < word_x0 - intraline_tol)
or (next_char["top"] > word_bottom + interline_tol)
or (next_char["bottom"] < word_top - interline_tol)
)
def iter_chars_to_words(self, chars):
current_word = []
for char in chars:
if not self.keep_blank_chars and char["text"].isspace():
if current_word:
yield current_word
current_word = []
elif current_word and self.char_begins_new_word(current_word, char):
yield current_word
current_word = [char]
else:
current_word.append(char)
if current_word:
yield current_word
def iter_sort_chars(self, chars):
def upright_key(x):
return -int(x["upright"])
for upright_cluster in cluster_objects(chars, upright_key, 0):
upright = upright_cluster[0]["upright"]
cluster_key = "doctop" if upright else "x0"
# Cluster by line
subclusters = cluster_objects(
upright_cluster, cluster_key, self.y_tolerance
)
for sc in subclusters:
# Sort within line
sort_key = "x0" if upright else "doctop"
sc = sorted(sc, key=itemgetter(sort_key))
# Reverse order if necessary
if not (self.horizontal_ltr if upright else self.vertical_ttb):
sc = reversed(sc)
yield from sc
def iter_extract(self, chars):
if not self.use_text_flow:
chars = self.iter_sort_chars(chars)
grouping_key = itemgetter("upright", *self.extra_attrs)
grouped = itertools.groupby(chars, grouping_key)
for keyvals, char_group in grouped:
for word_chars in self.iter_chars_to_words(char_group):
yield self.merge_chars(word_chars)
def extract(self, chars):
return list(self.iter_extract(chars))
def extract_words(chars, **kwargs):
settings = dict(DEFAULT_WORD_EXTRACTION_SETTINGS)
settings.update(kwargs)
return WordExtractor(**settings).extract(chars)
def extract_text(
chars, x_tolerance=DEFAULT_X_TOLERANCE, y_tolerance=DEFAULT_Y_TOLERANCE
):
if len(chars) == 0:
return None
chars = to_list(chars)
doctop_clusters = cluster_objects(chars, "doctop", y_tolerance)
lines = (collate_line(line_chars, x_tolerance) for line_chars in doctop_clusters)
coll = "\n".join(lines)
return coll
collate_chars = extract_text
def filter_objects(objs, fn):
if isinstance(objs, dict):
return dict((k, filter_objects(v, fn)) for k, v in objs.items())
initial_type = type(objs)
objs = to_list(objs)
filtered = filter(fn, objs)
return initial_type(filtered)
def get_bbox_overlap(a, b):
a_left, a_top, a_right, a_bottom = decimalize(a)
b_left, b_top, b_right, b_bottom = decimalize(b)
o_left = max(a_left, b_left)
o_right = min(a_right, b_right)
o_bottom = min(a_bottom, b_bottom)
o_top = max(a_top, b_top)
o_width = o_right - o_left
o_height = o_bottom - o_top
if o_height >= 0 and o_width >= 0 and o_height + o_width > 0:
return (o_left, o_top, o_right, o_bottom)
else:
return None
def calculate_area(bbox):
left, top, right, bottom = bbox
if left > right or top > bottom:
raise ValueError(f"{bbox} has a negative width or height.")
return (right - left) * (bottom - top)
def clip_obj(obj, bbox):
bbox = decimalize(bbox)
overlap = get_bbox_overlap(obj_to_bbox(obj), bbox)
if overlap is None:
return None
dims = bbox_to_rect(overlap)
copy = dict(obj)
for attr in ["x0", "top", "x1", "bottom"]:
copy[attr] = dims[attr]
if dims["top"] != obj["bottom"] or dims["top"] != obj["bottom"]:
diff = dims["top"] - obj["top"]
copy["doctop"] = obj["doctop"] + diff
copy["width"] = copy["x1"] - copy["x0"]
copy["height"] = copy["bottom"] - copy["top"]
return copy
def intersects_bbox(objs, bbox):
"""
Filters objs to only those intersecting the bbox
"""
initial_type = type(objs)
objs = to_list(objs)
matching = [
obj for obj in objs if get_bbox_overlap(obj_to_bbox(obj), bbox) is not None
]
return initial_type(matching)
def within_bbox(objs, bbox):
"""
Filters objs to only those fully within the bbox
"""
if isinstance(objs, dict):
return dict((k, within_bbox(v, bbox)) for k, v in objs.items())
initial_type = type(objs)
objs = to_list(objs)
matching = [
obj
for obj in objs
if get_bbox_overlap(obj_to_bbox(obj), bbox) == obj_to_bbox(obj)
]
return initial_type(matching)
def crop_to_bbox(objs, bbox):
"""
Filters objs to only those intersecting the bbox,
and crops the extent of the objects to the bbox.
"""
if isinstance(objs, dict):
return dict((k, crop_to_bbox(v, bbox)) for k, v in objs.items())
initial_type = type(objs)
objs = to_list(objs)
cropped = list(filter(None, (clip_obj(obj, bbox) for obj in objs)))
return initial_type(cropped)
def move_object(obj, axis, value):
assert axis in ("h", "v")
if axis == "h":
new_items = (
("x0", obj["x0"] + value),
("x1", obj["x1"] + value),
)
if axis == "v":
new_items = [
("top", obj["top"] + value),
("bottom", obj["bottom"] + value),
]
if "doctop" | |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import argparse
import io
import nltk
import pickle
import requests
import re
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os
from os.path import join as pjoin
import json
from itertools import izip
from copy import deepcopy as cp
np.random.seed(123)
_PAD = b"<pad>" # no need to pad
_UNK = b"<unk>"
_START_VOCAB = [_PAD, _UNK]
PAD_ID = 0
UNK_ID = 1
DISCOURSE_MARKERS = [
"after",
"also",
"although",
"and",
"as",
"because",
"before",
"but",
"for example",
"however",
"if",
"meanwhile",
"so",
"still",
"then",
"though",
"when",
"while"
]
DISCOURSE_MARKER_SET_TAG = "ALL18"
# patterns = {
# "because": ("IN", "mark", "advcl"),
# }
def setup_args():
parser = argparse.ArgumentParser()
code_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)))
glove_dir = os.path.join("data", "glove.6B")
parser.add_argument("--dataset", default="wikitext-103", type=str)
parser.add_argument("--train_size", default=0.9, type=float)
parser.add_argument("--glove_dir", default=glove_dir)
parser.add_argument("--method", default="string_ssplit_int_init", type=str)
parser.add_argument("--caching", action='store_true')
parser.add_argument("--action", default='collect_raw', type=str)
parser.add_argument("--glove_dim", default=300, type=int)
parser.add_argument("--random_init", action='store_true')
parser.add_argument("--max_seq_len", default=50, type=int)
parser.add_argument("--min_seq_len", default=5, type=int)
parser.add_argument("--max_ratio", default=5.0, type=float)
parser.add_argument("--undersamp_cutoff", default=0, type=int)
return parser.parse_args()
def initialize_vocabulary(vocabulary_path):
# map vocab to word embeddings
if os.path.isfile(vocabulary_path):
rev_vocab = []
with open(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip('\n') for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path)
def process_glove(args, vocab_dict, save_path, random_init=True):
"""
:param vocab_list: [vocab]
:return:
"""
if os.path.isfile(save_path + ".npz"):
print("Glove file already exists at %s" % (save_path + ".npz"))
else:
glove_path = os.path.join(args.glove_dir, "glove.840B.{}d.txt".format(args.glove_dim))
if random_init:
glove = np.random.randn(len(vocab_dict), args.glove_dim)
else:
glove = np.zeros((len(vocab_dict), args.glove_dim))
found = 0
with open(glove_path, 'r') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in vocab_dict: # all cased
idx = vocab_dict[word]
glove[idx, :] = np.fromstring(vec, sep=' ')
found += 1
# print("{}/{} of word vocab have corresponding vectors in {}".format(found, len(vocab), glove_path))
np.savez_compressed(save_path, glove=glove)
print("saved trimmed glove matrix at: {}".format(save_path))
def create_vocabulary(vocabulary_path, sentence_pairs_data, discourse_markers=None):
if os.path.isfile(vocabulary_path):
print("Vocabulary file already exists at %s" % vocabulary_path)
else:
print("Creating vocabulary {}".format(vocabulary_path))
vocab = {}
counter = 0
for s1, s2, label in sentence_pairs_data:
counter += 1
if counter % 100000 == 0:
print("processing line %d" % counter)
for w in s1:
if not w in _START_VOCAB:
if w in vocab:
vocab[w] += 1
else:
vocab[w] = 1
for w in s2:
if not w in _START_VOCAB:
if w in vocab:
vocab[w] += 1
else:
vocab[w] = 1
vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)
print("Vocabulary size: %d" % len(vocab_list))
with open(vocabulary_path, mode="wb") as vocab_file:
for w in vocab_list:
vocab_file.write(w + b"\n")
def sentence_to_token_ids(sentence, vocabulary):
return [vocabulary.get(w, UNK_ID) for w in sentence]
def merge_dict(dict_list1, dict_list2):
for key, list_sent in dict_list1.iteritems():
dict_list1[key].extend(dict_list2[key])
return dict_list1
def data_to_token_ids(data, rev_class_labels, class_label_dict, target_path, vocabulary_path, data_dir):
if os.path.isfile(target_path):
print("file {} already exists".format(target_path))
else:
vocab, _ = initialize_vocabulary(vocabulary_path)
ids_data = []
text_data = []
counter = 0
for s1, s2, text_label in data:
label = class_label_dict[text_label]
counter += 1
if counter % 1000000 == 0:
print("converting %d" % (counter))
token_ids_s1 = sentence_to_token_ids(s1, vocab)
token_ids_s2 = sentence_to_token_ids(s2, vocab)
ids_data.append((token_ids_s1, token_ids_s2, label))
# shuffled_idx = range(len(ids_data))
# np.random.shuffle(shuffled_idx)
# shuffled_ids_data = [ids_data[idx] for idx in shuffled_idx]
# shuffled_text_data = [text_data[idx] for idx in shuffled_idx]
print("writing {}".format(target_path))
pickle.dump(ids_data, open(target_path, mode="wb"))
def undo_rephrase(lst):
return " ".join(lst).replace("for_example", "for example").split()
def rephrase(str):
return str.replace("for example", "for_example")
def string_ssplit_int_init(sentence, previous_sentence, marker):
if marker=="for example":
words = rephrase(sentence).split()
if "for_example"==words[0].lower():
s1 = previous_sentence
s2 = " ".join(undo_rephrase(words[1:]))
else:
idx = [w.lower() for w in words].index("for_example")
s1 = " ".join(undo_rephrase(words[:idx]))
s2 = " ".join(undo_rephrase(words[idx+1:]))
else:
words = sentence.split()
if marker==words[0].lower(): # sentence-initial
s1 = previous_sentence
s2 = " ".join(words[1:])
else: # sentence-internal
idx = [w.lower() for w in words].index(marker)
s1 = " ".join(words[:idx])
s2 = " ".join(words[idx+1:])
return (s1.strip(), s2.strip(), marker)
def string_ssplit_clean_markers():
raise Exception("haven't included clean ssplit in this script yet")
def depparse_ssplit_v1():
raise Exception("haven't included old combination depparse ssplit in this script yet")
def depparse_ssplit_v2(sentence, previous_sentence, marker):
dangerous_dependencies = ["mark", "advcl", "acl"]
dependency_patterns = {
"after": {
"POS": "IN",
"S2": "mark", # S2 head (full S head) ---> connective
"S1": ["advcl", "acl"]
},
"also": {
"POS": "RB",
"S2": "advmod",
"S1": ["advcl"]
},
"although": {
"POS": "IN",
"S2": "mark",
"S1": ["advcl"]
},
"and": {
"POS": "CC",
"S2": "cc",
"S1": ["conj"]
},
"before": {
"POS": "IN",
"S2": "mark",
"S1": ["advcl"]
},
"so": {
"POS": "IN",
"S2": "mark",
"S1": ["advcl"]
},
"still": {
"POS": "RB",
"S2": "advmod",
"S1": ["parataxis", "dep"]
},
"though": {
"POS": "IN",
"S2": "mark",
"S1": ["advcl"]
},
"because": {
"POS": "IN",
"S2": "mark",
"S1": ["advcl"]
},
"however": {
"POS": "RB",
"S2": "advmod",
"S1": ["dep", "parataxis"]
},
"if": {
"POS": "IN",
"S2": "mark",
"S1": ["advcl"]
},
"while": {
"POS": "IN",
"S2": "mark",
"S1": ["advcl"]
}
}
# search for pattern:
# "[discourse marker] S2, S1" (needs dependency parse)
def search_for_reverse_pattern_pair(sent, marker, words, previous_sentence):
parse_string = get_parse(sent, depparse=True)
# book corpus maybe has carriage returns and new lines and other things?
try:
parse = json.loads(parse_string.replace('\r\n', ''))
except ValueError:
parse = json.loads(re.sub("[^A-z0-9.,!:?\"'*&/\{\}\[\]()=+-]", "", parse_string))
sentence = Sentence(parse["sentences"][0], sent)
return sentence.find_pair(marker, "s2 discourse_marker s1", previous_sentence)
def is_verb_tag(tag):
return tag[0] == "V" and not tag[-2:] in ["BG", "BN"]
"""
POS-tag string as if it's a sentence
and see if it has a verb that could plausibly be the predicate.
"""
def has_verb(string):
parse = get_parse(string, depparse=False)
tokens = json.loads(parse)["sentences"][0]["tokens"]
return any([is_verb_tag(t["pos"]) for t in tokens])
"""
using the depparse, look for the desired pattern, in any order
"""
def search_for_dep_pattern(marker, current_sentence, previous_sentence):
parse_string = get_parse(current_sentence, depparse=True)
# book corpus maybe has carriage returns and new lines and other things?
try:
parse = json.loads(parse_string.replace('\r\n', ''))
except ValueError:
parse = json.loads(re.sub("[^A-z0-9.,!?:\"'*&/\{\}\[\]()=+-]", "", parse_string))
sentence = Sentence(parse["sentences"][0], current_sentence)
return sentence.find_pair(marker, "any", previous_sentence)
# https://stackoverflow.com/a/18669080
def get_indices(lst, element, case="sensitive"):
result = []
starting_index = -1
while True:
try:
found_index = lst.index(element, starting_index+1)
starting_index = found_index
except ValueError:
return result
result.append(found_index)
def get_nearest(lst, element):
distances = [abs(e-element) for e in lst]
return lst[np.argmin(distances)]
def separate_at_signs(lst):
s = " ".join(lst)
separated_s = re.sub(" @([^ ]+)@ ", " @ \1 @ ", s)
return separated_s.split()
"""
parsed tokenization is different from original tokenization.
try to re-align and extract the correct words given the
extraction_indices (which are 1-indexed into parsed_words)
fix me to catch more cases?
"""
def extract_subphrase(orig_words, parsed_words, extraction_indices):
extraction_indices = [i-1 for i in extraction_indices]
orig_words = separate_at_signs(orig_words)
if len(orig_words) == len(parsed_words):
return " ".join([orig_words[i] for i in extraction_indices])
else:
first_parse_index = extraction_indices[0]
first_word_indices = get_indices(orig_words, parsed_words[first_parse_index])
last_parse_index = extraction_indices[-1]
last_word_indices = get_indices(orig_words, parsed_words[last_parse_index])
if len(first_word_indices)>0 and len(last_word_indices)>0:
first_orig_index = get_nearest(first_word_indices, first_parse_index)
last_orig_index = get_nearest(last_word_indices, last_parse_index)
if last_orig_index-first_orig_index == last_parse_index-first_parse_index:
# maybe it's just shifted
shift = first_orig_index - first_parse_index
extraction_indices = [i+shift for i in extraction_indices]
return " ".join([orig_words[i] for i in extraction_indices])
else:
# or maybe there's funny stuff happening inside the subphrase
# in which case T-T
return None
else:
if len(first_word_indices)>0 and abs(last_parse_index-len(parsed_words))<3:
# the end of the sentence is always weird. assume it's aligned
# grab the start of the subphrase
first_orig_index = get_nearest(first_word_indices, first_parse_index)
# shift if necessary
shift = first_orig_index - first_parse_index
extraction_indices = [i+shift for i in extraction_indices]
if len(orig_words) > extraction_indices[-1]:
# extend to the end of the sentence if we're not already there
extraction_indices += range(extraction_indices[-1]+1, len(orig_words))
else:
extraction_indices = [i for i in extraction_indices if i<len(orig_words)]
return " ".join([orig_words[i] for i in extraction_indices])
else:
# or maybe the first and/or last words have been transformed,
# in which case T-T
return None
"""
use corenlp server (see https://github.com/erindb/corenlp-ec2-startup)
to parse sentences: tokens, dependency parse
"""
def get_parse(sentence, depparse=True):
sentence = sentence.replace("'t ", " 't ")
if depparse:
url = "http://localhost:12345?properties={annotators:'tokenize,ssplit,pos,depparse'}"
else:
url = "http://localhost:12345?properties={annotators:'tokenize,ssplit,pos'}"
data = sentence
parse_string = requests.post(url, data=data).text
return json.loads(parse_string)["sentences"][0]
class Sentence():
def __init__(self, json_sentence, original_sentence):
self.json = json_sentence
self.dependencies = json_sentence["basicDependencies"]
self.tokens = json_sentence["tokens"]
self.original_sentence = original_sentence
def indices(self, word):
if len(word.split(" ")) > 1:
words = word.split(" ")
indices = [i for lst in [self.indices(w) for w in words] for i in lst]
return indices
else:
return [i+1 for i in get_indices([t["word"].lower() for t in self.tokens], word)]
def token(self, index):
return self.tokens[int(index)-1]
def word(self, index):
return self.token(index)["word"]
def find_parents(self, index, | |
memory_fill_only = False):
""" parallelized implementation of single iteration"""
initial_qv_weights = None
qv_update_launched = False
total_log = None
total_runs = tot_successful_runs = 0
task_lst = [None for _ in self.sim_agents_discr]
iter_traj_stats = []
while True:
self.display_progress()
# 1) start qv update based on existing memory stored
if not memory_fill_only:
if ray.get(self.nn_updater.hasMemoryPool.remote()) and not qv_update_launched:
# this check is required to ensure nn_udater has finished loading the memory
initial_qv_weights = deepcopy(ray.get(self.nn_updater.getAttributeValue.remote('model_qv')).cpu().state_dict())
updating_process = self.nn_updater.update_DQN_asynch.remote()
qv_update_launched = True
# 2) start parallel simulations
for i, agent in enumerate(self.sim_agents_discr):
# 2.1) get info from complete processes
try:
agent_running = ray.get(agent.isRunning.remote(), timeout = 1)
except Exception:
agent_running = False
if not agent_running :
if not task_lst[i] in [None, 'deactivated']:
try:
partial_log, single_runs, successful_runs,traj_stats,internal_memory_fill_ratio , pg_info = ray.get(task_lst[i], timeout = 1)
# partial_log contains 1) duration and 2) cumulative reward of every single-run
task_lst[i] = None
if total_log is None:
total_log = partial_log
else:
total_log = np.append(total_log, partial_log, axis = 0)
total_runs += single_runs
self.shared_memory.addMemoryBatch(ray.get(agent.emptyLocalMemory.remote()) )
self.display_progress()
if validate_DQL:
iter_traj_stats.extend(traj_stats)
except Exception:
print('Killing Actor and generating new one')
task_lst[i] = 'deactivated'
ray.kill(agent)
self.sim_agents_discr[i] = self.createRayActor_Agent(i)
self.sim_agents_discr[i].setAttribute.remote('model_qv', self.model_qv)
agent = self.sim_agents_discr[i]
# 2.1.1) run another task if required
expected_upcoming_memory_ratio = sum(not tsk in [None, 'deactivated'] for tsk in task_lst)*self.tot_iterations/self.memory_turnover_size
min_fill = 0.95-np.clip(expected_upcoming_memory_ratio,0,1)
if not self.shared_memory.isPartiallyFull(min_fill):
if self.continuous_qv_update and qv_update_launched:
model_qv_weights = ray.get(self.nn_updater.get_current_QV_model.remote()).cpu().state_dict()
agent.update_model_weights_only.remote(model_qv_weights, 'model_qv')
if task_lst[i] is None:
task_lst[i] = agent.run.remote(use_NN = validate_DQL)
# if memory is (almost) full and task list empty, inform qv update it can stop
if self.shared_memory.isFull() or all(tsk in [None, 'deactivated'] for tsk in task_lst) :
if qv_update_launched:
self.nn_updater.sentinel.remote()
# after iteration extract data from remote QV update process
self.qval_loss_hist_iteration, self.av_map_loss , self.n_epochs = np.array(ray.get(updating_process))
break
self.clear_task_lists(task_lst)
if validate_DQL:
self.traj_stats.append(iter_traj_stats)
self.end_validation_routine(total_runs, tot_successful_runs, total_log)
if not memory_fill_only:
self.post_iteration_routine(initial_qv_weights = initial_qv_weights, total_runs= total_runs, total_log= total_log)
##################################################################################
def post_iteration_routine(self, initial_qv_weights = None, initial_pg_weights = None, initial_v_weights= None, \
total_runs= None, total_log= None, temp_pg_loss= None, temp_entropy= None, \
temp_advantage = None, temp_map_loss = None):
""" store data, display intermediate results """
average_qval_loss = policy_loss = pg_entropy = map_loss = np.nan
if initial_qv_weights is not None:
# 1) compute qv average loss
average_qval_loss = np.round(np.average(self.qval_loss_hist_iteration), 3)
print(f'QV loss = {average_qval_loss}')
if self.av_map_loss is not None:
map_loss = self.av_map_loss
print(f'average map loss = {map_loss}')
# 2) show model differences
if self.ray_parallelize:
current_qv_weights = deepcopy(ray.get(self.nn_updater.get_current_QV_model.remote())).cpu().state_dict()
else:
current_qv_weights = deepcopy(self.nn_updater.model_qv).cpu().state_dict()
model_qv_diff = self.model_qv.compare_weights(initial_qv_weights , current_qv_weights )
print(f'model QV update : {np.round(model_qv_diff,5)}')
if initial_pg_weights is not None:
model_diff_pg = np.round(self.model_pg.compare_weights(initial_pg_weights), 5)
print(f'PG model change: {model_diff_pg}')
if self.rl_mode == 'AC':
model_diff_v = np.round(self.model_v.compare_weights(initial_v_weights), 5)
print(f'V model change: {model_diff_v}')
adv_loss = np.round(sum(temp_advantage)/(1e-5+len(temp_advantage)), 3)
average_qval_loss = adv_loss
policy_loss = np.round(sum(temp_pg_loss)/(1e-5+len(temp_pg_loss)), 3)
pg_entropy = np.round(sum(temp_entropy)/(1e-5+len(temp_entropy)), 3)
print('')
print(f'average policy loss : {policy_loss}')
print(f'average pg entropy : {np.round(100*pg_entropy/self.max_entropy, 2)}%')
if self.map_output:
map_loss = np.round(sum(temp_map_loss)/(1e-5+len(temp_map_loss)), 3)
print(f'average map-output loss : {map_loss}')
if self.rl_mode == 'AC':
print(f'average adv loss : {adv_loss}')
print('')
print(f'average cum-reward : {np.round(np.average(total_log[:,1]),5)}')
#print('total log:')
#print(total_log[:,1])
#3) store computed losses
self.get_log(total_runs, total_log, qval_loss = average_qval_loss, policy_loss = policy_loss , pg_entropy = pg_entropy, map_loss = map_loss)
##################################################################################
def get_log(self, total_runs, total_log, **kwargs):
#total log includes (for all runs): [0] steps since start [1] cumulative reward
for k,v in kwargs.items():
self.session_log[k] = np.round( v , 3)
# total runs, steps since start, cumulative reward, average loss / total_successful_runs, entropy (pg training only)
self.session_log['total runs'] = int(total_runs)
self.session_log['steps since start'] = np.round(np.average(total_log[:,0]),2)
self.session_log['cumulative reward'] = np.round(np.average(total_log[:,1]),2)
##################################################################################
def validation_serialized(self):
total_log = np.zeros((1,2),dtype = np.float)
total_runs = 0
tot_successful_runs = 0
iter_traj_stats = []
while not self.shared_memory.isFull():
self.display_progress()
partial_log, single_runs, successful_runs, traj_stats , _, _ = self.sim_agents_discr[0].run_synch(use_NN = True)
iter_traj_stats.extend(traj_stats)
# partial log: steps_since_start, cum_reward
total_log = np.append(total_log, partial_log, axis = 0)
total_runs += single_runs
tot_successful_runs += successful_runs
if not self.shared_memory.isFull():
self.shared_memory.addMemoryBatch(self.sim_agents_discr[0].emptyLocalMemory() )
self.traj_stats.append(iter_traj_stats)
print('')
print('')
self.end_validation_routine(total_runs,tot_successful_runs, total_log)
##################################################################################
def validation_parallelized(self):
total_log = np.zeros((1,2),dtype = np.float)
min_fill_ratio = 0.85
total_runs = tot_successful_runs = 0
reload_val = False
agents_lst = []
failures = len(self.sim_agents_discr)*[None]
[agents_lst.append(agent.run.remote(use_NN = True)) for i, agent in enumerate(self.sim_agents_discr)]
start_time = time.time()
iter_traj_stats = []
while True:
task_ready, task_not_ready = ray.wait([i for i in agents_lst if i], num_returns=1, timeout= 1)
if task_ready:
idx_ready = agents_lst.index(task_ready[0])
try:
partial_log, single_runs, successful_runs, traj_stats, _ , _ = ray.get(agents_lst[idx_ready], timeout = 5)
iter_traj_stats.extend(traj_stats)
total_log = np.append(total_log, partial_log, axis = 0)
total_runs += single_runs
tot_successful_runs += successful_runs
self.shared_memory.addMemoryBatch(ray.get(self.sim_agents_discr[idx_ready].emptyLocalMemory.remote(), timeout = 5) )
self.display_progress()
agents_lst[idx_ready] = None
failures[idx_ready] = None
except Exception:
failures[idx_ready] = True
print('memory download fail!')
if not self.shared_memory.isPartiallyFull(min_fill_ratio):
agents_lst[idx_ready] = self.sim_agents_discr[idx_ready].run.remote(use_NN = True)
if all([i is None for i in agents_lst]) or \
all([ failures[i] for i,ag in enumerate(agents_lst) if ag is not None ]) or \
time.time() - start_time > 600:
break
self.traj_stats.append(iter_traj_stats)
print('')
self.clear_task_lists(agents_lst)
self.end_validation_routine(total_runs, tot_successful_runs, total_log)
##################################################################################
def end_validation_routine(self, total_runs, tot_successful_runs, total_log):
"""
new_memory = self.shared_memory.getMemoryAndEmpty()
#turnover_pctg_act = np.round(100*self.memory_stored.addMemoryBatch(new_memory),2)
if self.ray_parallelize :
self.nn_updater.update_memory_pool.remote(new_memory)
else:
self.nn_updater.update_memory_pool(new_memory)
"""
#total log includes (for all runs): 1) steps since start 2) cumulative reward
self.get_log(total_runs, total_log)
if total_runs > 0 :
success_ratio = np.round(tot_successful_runs/total_runs ,2)
else:
success_ratio = -1
val_history = np.array([self.training_session_number, success_ratio, self.session_log['total runs'], \
self.session_log['steps since start'], self.session_log['cumulative reward'] ])
# 0) iteration ---- 1)success_ratio 2) total runs 3) average duration 4)average single run reward
if self.val_history is None:
self.val_history = val_history[np.newaxis,:].copy()
else:
self.val_history = np.append(self.val_history, val_history[np.newaxis,:], axis = 0)
"""
if self.bashplot and self.val_history.shape[0] > 5:
np.savetxt("val_hist.csv", self.val_history[:,[0,3]], delimiter=",")
plot_scatter(f = "val_hist.csv",xs = None, ys = None, size = 20, colour = 'yellow',pch = '*', title = 'validation history')
"""
self.saveValIteration()
print(f'training iteration = {val_history[0]}')
#print(f'test samples = {round(self.shared_memory.fill_ratio*self.shared_memory.size)}')
print(f'total single runs = {val_history[2]}')
print(f'success ratio = {val_history[1]}')
print(f'average steps since start = {val_history[3]}')
print(f'average cumulative reward = {val_history[4]}')
##################################################################################
def print_NN_parameters_count(self):
print('')
print('')
if self.rl_mode == 'AC':
actor_n_params = self.model_pg.count_NN_params()
critic_n_params = self.model_v.count_NN_params()
print('total NN trainable parameters:')
print(f'actor :{actor_n_params}')
print(f'critic :{critic_n_params}')
elif self.rl_mode == 'parallelAC':
actor_n_params = self.model_pg.count_NN_params()
QV_critic_n_params = self.model_qv.count_NN_params()
print('total NN trainable parameters:')
print(f'actor :{actor_n_params}')
print(f'critic (QV):{QV_critic_n_params}')
elif self.rl_mode == 'DQL':
DQL_n_params = self.model_qv.count_NN_params()
print('total NN trainable parameters:')
print(f'DQL :{DQL_n_params}')
print('')
print('')
##################################################################################
def runSequence(self, n_runs = 5, display_graphs = False, reset_optimizer = False):
self.print_NN_parameters_count()
final_run = self.training_session_number+ n_runs
for i in np.arange(self.training_session_number,final_run+1):
print(f'simulating with {self.n_agents_discr} agents')
validate_DQL = (not i % self.val_frequency) and self.rl_mode == 'DQL'
self.runEnvIterationOnce(validate_DQL, reset_optimizer)
print('###################################################################')
print('###################################################################')
print(f'Model Saved in {self.storage_path}')
print(f'end of iteration: {self.training_session_number} of {final_run+1}')
print('###################################################################')
print('###################################################################')
if reset_optimizer:
reset_optimizer = False
if display_graphs:
self.plot_training_log()
"""
if not i % self.val_frequency and self.rl_mode == 'DQL':
print('###################################################################')
print('validation cycle start...')
self.shared_memory.resetMemory(self.validation_set_size)
if self.update_model_cpu():
self.updateAgentsAttributesExcept(print_attribute=['env','env','model','model_version'])
else:
raise('Loading error')
if self.ray_parallelize:
self.validation_parallelized()
else:
self.validation_serialized()
print('end of validation cycle')
print('###################################################################')
"""
##################################################################################
def updateProbabilities(self, print_out = False):
self.updateEpsilon()
self.updateCtrlr_probability()
split_conventional_ctrl = np.round(100*self.ctrlr_probability,2)
split_random = np.round(100*self.epsilon*(1-self.ctrlr_probability),2)
split_NN_ctrl = np.round(100*(1-self.epsilon)*(1-self.ctrlr_probability),2)
self.splits = np.array([split_random, split_conventional_ctrl, split_NN_ctrl])
if print_out:
#print(f'y = {self.epsilon}')
#print(f'c = {self.ctrlr_probability}')
print(f'Random share: {split_random}%')
print(f'Conventional ctl share: {split_conventional_ctrl}%')
print(f'NN share: {split_NN_ctrl}%')
##################################################################################
def pre_training_routine(self, reset_optimizer = False, first_pg_training = False):
# it only depends on the memory size
# shared memory is the one filled by the agents, to be poured later in "stored memory".
#it's resetted at every new cycle.
#after the first fill, only the portion defined by "memory_turnover_size" is filled
self.shared_memory.resetMemory(bool(self.memory_stored)*self.memory_turnover_size)
self.updateProbabilities(print_out = (self.rl_mode == 'DQL') )
# update epsilon and model of the Sim agents
if self.training_session_number == 0 or first_pg_training:
self.check_model_version(print_out = True, mode = 'sim', save_v0 = True)
#self.update_model_cpu() returns True if succesful
if self.update_model_cpu():
self.updateAgentsAttributesExcept(print_attribute=['env','env','model','model_version'])
else:
raise('Loading error')
| |
<reponame>chellebodnar-google/public-datasets-pipelines
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import random
import re
import shutil
import subprocess
import tempfile
import pytest
from ruamel import yaml
from scripts import generate_terraform
PROJECT_ROOT = generate_terraform.PROJECT_ROOT
FILE_PATHS = {
"dataset": PROJECT_ROOT / "samples" / "dataset.yaml",
"pipeline": PROJECT_ROOT / "samples" / "pipeline.yaml",
"license": PROJECT_ROOT / "templates" / "airflow" / "license_header.py.jinja2",
}
ENV_PATH = PROJECT_ROOT / ".test"
ENV_DATASETS_PATH = ENV_PATH / "datasets"
yaml = yaml.YAML(typ="safe")
@pytest.fixture
def dataset_path():
with tempfile.TemporaryDirectory(
dir=generate_terraform.DATASETS_PATH, suffix="_dataset"
) as dir_path:
try:
yield pathlib.Path(dir_path)
finally:
shutil.rmtree(dir_path, ignore_errors=True)
@pytest.fixture
def pipeline_path(dataset_path, suffix="_pipeline"):
pipelines_dir = dataset_path / "pipelines"
pipelines_dir.mkdir(parents=True, exist_ok=True)
with tempfile.TemporaryDirectory(dir=pipelines_dir, suffix=suffix) as dir_path:
try:
yield pathlib.Path(dir_path)
finally:
shutil.rmtree(dir_path)
@pytest.fixture
def project_id() -> str:
return "test-gcp-project-id"
@pytest.fixture
def bucket_name_prefix() -> str:
return "1234-zyxwvu"
@pytest.fixture
def region() -> str:
return "us-east4"
@pytest.fixture
def impersonating_acct() -> str:
return "<EMAIL>"
@pytest.fixture
def gcs_bucket_resource() -> dict:
return {
"type": "storage_bucket",
"name": "{{ friendly_project_id }}.{{ dataset_id }}",
}
@pytest.fixture
def bq_table_resource() -> dict:
return {
"type": "bigquery_table",
"table_id": "test_bq_table",
"schema": [
{"name": "test_col_string", "type": "STRING"},
{"name": "test_col_int", "type": "INT64"},
{"name": "test_col_numeric", "type": "NUMERIC"},
{"name": "test_col_datetime", "type": "DATETIME"},
{"name": "test_col_struct", "type": "STRUCT"},
],
}
@pytest.fixture
def tf_state_bucket() -> str:
return "test-terraform-state-bucket"
@pytest.fixture
def tf_state_prefix() -> str:
return "test/terraform/state"
@pytest.fixture
def env() -> str:
return "test"
def set_dataset_ids_in_config_files(
dataset_path: pathlib.Path, pipeline_path: pathlib.Path
):
shutil.copyfile(FILE_PATHS["dataset"], dataset_path / "pipelines" / "dataset.yaml")
shutil.copyfile(FILE_PATHS["pipeline"], pipeline_path / "pipeline.yaml")
dataset_config = yaml.load(dataset_path / "pipelines" / "dataset.yaml")
dataset_config["dataset"]["name"] = dataset_path.name
for resource in dataset_config["resources"]:
if resource["type"] == "bigquery_dataset":
resource["dataset_id"] = dataset_path.name
yaml.dump(dataset_config, dataset_path / "pipelines" / "dataset.yaml")
pipeline_config = yaml.load(pipeline_path / "pipeline.yaml")
for resource in pipeline_config["resources"]:
if resource["type"] == "bigquery_table":
resource["dataset_id"] = dataset_path.name
yaml.dump(pipeline_config, pipeline_path / "pipeline.yaml")
def test_tf_templates_exist():
for _, filepath in generate_terraform.TEMPLATE_PATHS.items():
assert filepath.exists()
def test_main_generates_tf_files(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
assert (path_prefix / "provider.tf").exists()
assert (path_prefix / f"{dataset_path.name}_dataset.tf").exists()
assert (path_prefix / f"{pipeline_path.name}_pipeline.tf").exists()
assert (path_prefix / "variables.tf").exists()
assert not (
generate_terraform.DATASETS_PATH
/ dataset_path.name
/ "infra"
/ "terraform.tfvars"
).exists()
assert (
ENV_DATASETS_PATH / dataset_path.name / "infra" / "terraform.tfvars"
).exists()
assert not (
generate_terraform.DATASETS_PATH / dataset_path.name / "infra" / "backend.tf"
).exists()
assert (ENV_DATASETS_PATH / dataset_path.name / "infra" / "backend.tf").exists()
def test_main_without_tf_remote_state_generates_tf_files_except_backend_tf(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
assert (path_prefix / "provider.tf").exists()
assert (path_prefix / f"{dataset_path.name}_dataset.tf").exists()
assert (path_prefix / f"{pipeline_path.name}_pipeline.tf").exists()
assert (path_prefix / "variables.tf").exists()
assert not (path_prefix / "backend.tf").exists()
assert not (
generate_terraform.DATASETS_PATH
/ dataset_path.name
/ "infra"
/ "terraform.tfvars"
).exists()
assert (
ENV_DATASETS_PATH / dataset_path.name / "infra" / "terraform.tfvars"
).exists()
pipeline_path_2 = pipeline_path
def test_main_with_multiple_pipelines(
dataset_path,
pipeline_path,
pipeline_path_2,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
):
assert pipeline_path.name != pipeline_path_2.name
shutil.copyfile(FILE_PATHS["dataset"], dataset_path / "pipelines" / "dataset.yaml")
shutil.copyfile(FILE_PATHS["pipeline"], pipeline_path / "pipeline.yaml")
shutil.copyfile(FILE_PATHS["pipeline"], pipeline_path_2 / "pipeline.yaml")
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
assert (path_prefix / "provider.tf").exists()
assert (path_prefix / f"{dataset_path.name}_dataset.tf").exists()
assert (path_prefix / f"{pipeline_path.name}_pipeline.tf").exists()
assert (path_prefix / f"{pipeline_path_2.name}_pipeline.tf").exists()
assert (path_prefix / "variables.tf").exists()
assert not (
generate_terraform.DATASETS_PATH
/ dataset_path.name
/ "infra"
/ "terraform.tfvars"
).exists()
assert (
ENV_DATASETS_PATH / dataset_path.name / "infra" / "terraform.tfvars"
).exists()
assert not (
generate_terraform.DATASETS_PATH / dataset_path.name / "infra" / "backend.tf"
).exists()
assert (ENV_DATASETS_PATH / dataset_path.name / "infra" / "backend.tf").exists()
def test_main_with_multiple_bq_dataset_ids(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
# First, declare an additional custom BQ dataset in dataset.yaml
another_dataset_id = "another_dataset"
assert another_dataset_id != dataset_path.name
dataset_config = yaml.load(dataset_path / "pipelines" / "dataset.yaml")
dataset_config["resources"].append(
{"type": "bigquery_dataset", "dataset_id": another_dataset_id}
)
yaml.dump(dataset_config, dataset_path / "pipelines" / "dataset.yaml")
# Then, add a BQ table under the additional BQ dataset
pipeline_config = yaml.load(pipeline_path / "pipeline.yaml")
pipeline_config["resources"].append(
{
"type": "bigquery_table",
"table_id": "another_table",
"dataset_id": another_dataset_id,
}
)
yaml.dump(pipeline_config, pipeline_path / "pipeline.yaml")
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
assert (path_prefix / f"{dataset_path.name}_dataset.tf").exists()
assert (path_prefix / f"{pipeline_path.name}_pipeline.tf").exists()
# Match the "google_bigquery_dataset" properties, i.e. any lines between the
# curly braces, in the *_dataset.tf file
regexp = r"\"google_bigquery_dataset\" \"" + r"[A-Za-z0-9_]+" + r"\" \{(.*?)\}"
bq_dataset_tf_string = re.compile(regexp, flags=re.MULTILINE | re.DOTALL)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
matches = bq_dataset_tf_string.findall(
(path_prefix / f"{dataset_path.name}_dataset.tf").read_text()
)
dataset_ids = set()
for match in matches:
result = re.search(r"dataset_id\s+\=\s+\"([A-Za-z0-9_]+)\"", match)
assert result.group(1)
dataset_ids.add(result.group(1))
# Assert that the dataset_ids are unique
assert len(dataset_ids) == len(matches)
assert another_dataset_id in dataset_ids
assert dataset_path.name in dataset_ids
def test_dataset_without_any_pipelines(
dataset_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
):
(dataset_path / "pipelines").mkdir(parents=True)
shutil.copyfile(FILE_PATHS["dataset"], dataset_path / "pipelines" / "dataset.yaml")
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
assert (path_prefix / "provider.tf").exists()
assert (path_prefix / f"{dataset_path.name}_dataset.tf").exists()
assert not (
generate_terraform.DATASETS_PATH
/ dataset_path.name
/ "infra"
/ "terraform.tfvars"
).exists()
assert (
ENV_DATASETS_PATH / dataset_path.name / "infra" / "terraform.tfvars"
).exists()
assert not (
generate_terraform.DATASETS_PATH / dataset_path.name / "infra" / "backend.tf"
).exists()
assert (ENV_DATASETS_PATH / dataset_path.name / "infra" / "backend.tf").exists()
def test_dataset_path_does_not_exist(
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
):
with pytest.raises(FileNotFoundError):
generate_terraform.main(
"non_existing_dir",
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
)
def test_generated_tf_files_contain_license_headers(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
tf_state_bucket,
tf_state_prefix,
)
license_header = pathlib.Path(
generate_terraform.TEMPLATE_PATHS["license"]
).read_text()
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
assert (path_prefix / "provider.tf").read_text().count(license_header) == 1
assert (path_prefix / f"{dataset_path.name}_dataset.tf").read_text().count(
license_header
) == 1
assert (path_prefix / f"{pipeline_path.name}_pipeline.tf").read_text().count(
license_header
) == 1
assert (path_prefix / "variables.tf").read_text().count(license_header) == 1
assert (
ENV_DATASETS_PATH / dataset_path.name / "infra" / "terraform.tfvars"
).read_text().count(license_header) == 1
assert (
ENV_DATASETS_PATH / dataset_path.name / "infra" / "backend.tf"
).read_text().count(license_header) == 1
def test_dataset_tf_file_contains_description_when_specified(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
)
config = yaml.load(open(dataset_path / "pipelines" / "dataset.yaml"))
bq_dataset = next(
(r for r in config["resources"] if r["type"] == "bigquery_dataset"), None
)
assert bq_dataset
assert bq_dataset["description"]
# Match the "google_bigquery_dataset" properties, i.e. any lines between the
# curly braces, in the *_dataset.tf file
regexp = r"\"google_bigquery_dataset\" \"" + dataset_path.name + r"\" \{(.*?)\}"
bq_dataset_tf_string = re.compile(regexp, flags=re.MULTILINE | re.DOTALL)
for path_prefix in (
ENV_DATASETS_PATH / dataset_path.name / "infra",
generate_terraform.DATASETS_PATH / dataset_path.name / "infra",
):
result = bq_dataset_tf_string.search(
(path_prefix / f"{dataset_path.name}_dataset.tf").read_text()
)
assert re.search(r"dataset_id\s+\=", result.group(1))
assert re.search(r"description\s+\=", result.group(1))
def test_bq_dataset_can_have_a_description_with_newlines_and_quotes(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
shutil.copyfile(FILE_PATHS["dataset"], dataset_path / "pipelines" / "dataset.yaml")
shutil.copyfile(FILE_PATHS["pipeline"], pipeline_path / "pipeline.yaml")
config = yaml.load(open(dataset_path / "pipelines" / "dataset.yaml"))
# Get a bigquery_dataset resource and modify the `description` field
bq_dataset = next(
(r for r in config["resources"] if r["type"] == "bigquery_dataset"), None
)
test_description = 'Multiline\nstring with\n"quotes"'
bq_dataset["description"] = test_description
with open(dataset_path / "pipelines" / "dataset.yaml", "w") as file:
yaml.dump(config, file)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
)
env_dataset_path = ENV_DATASETS_PATH / dataset_path.name
subprocess.check_call(["terraform", "fmt"], cwd=env_dataset_path / "infra")
def test_dataset_tf_has_no_bq_dataset_description_when_unspecified(
dataset_path,
pipeline_path,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
):
set_dataset_ids_in_config_files(dataset_path, pipeline_path)
config = yaml.load(open(dataset_path / "pipelines" / "dataset.yaml"))
# Get the first bigquery_dataset resource and delete the `description` field
bq_dataset = next(
(r for r in config["resources"] if r["type"] == "bigquery_dataset")
)
del bq_dataset["description"]
with open(dataset_path / "pipelines" / "dataset.yaml", "w") as file:
yaml.dump(config, file)
generate_terraform.main(
dataset_path.name,
project_id,
bucket_name_prefix,
region,
impersonating_acct,
env,
None,
None,
)
# Match the "google_bigquery_dataset" properties, i.e. any lines between the
# curly braces, in the *_dataset.tf file
regexp = r"\"google_bigquery_dataset\" \"" + | |
<reponame>TJConnellyContingentMacro/northwestern<gh_stars>0
# 1. A famous researcher observed that chimpanzees hunt and eat meat as part of their regular diet.
# Sometimes chimpanzees hunt alone, while other times they form hunting parties. The following table
# summarizes research on chimpanzee hunting parties, giving the size of the hunting party and the percentage
# of successful hunts. Use Python to graph the data and find the least squares line. Then use the equation
# to predict the percentage of successful hunts, and the rate that percentage is changing, if there are 20
# chimpanzees in a hunting party.
import numpy as np
import matplotlib.pyplot as plt
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
y = np.array([15, 25, 23, 39, 35, 53, 40, 57, 60, 58, 70, 70, 73, 70, 70, 77])
A = np.vstack([x, np.ones(len(x))]).T
print(A)
m, c = np.linalg.lstsq(A, y)[0]
print(m, c)
x2 = np.append([x], [20])
y2 = np.append([y], [m * 20 + c])
plt.plot(x, y, 'o', label = 'Data', markersize = 6)
plt.plot(x2, m * x2 + c, 'b', label = 'Fitted')
plt.plot(20, m * 20 + c, 'rs')
plt.legend()
plt.show()
print('Expected Percentage with 20 Chimps:', m * 20 + c)
print('Rate of change:', m)
# 2. One gram of soybean meal provides at least 2.5 units of vitamins and 5 calories.
# One gram of meat byproducts provides at least 4.5 units of vitamins and 3 calories.
# One gram of grain provides at least 5 units of vitamins and 10 calories. If a gram of
# soybean costs 6 cents, a gram of meat byproducts costs 7 cents, and a gram of grain costs 8 cents,
# use Python to determine what mixture of the three ingredients will provide at least 48 units of vitamins
# and 54 calories per serving at a minimum cost? What will be the minimum cost?
from scipy.optimize import linprog as lp
import numpy as np
# minimize: 6x + 7y + 8z
# subject to:
# 2.5x + 4.5y + 5z >= 48
# 5x + 3y + 10z >= 54
# x, y, z >= 0
A = np.array([[-2.5, -4.5, -5], [-5, -3, -10]])
b = np.array([-48, -54])
buy = lp(np.array([6, 7, 8]), A, b)
print('Buy', buy.x[0], 'grams soybean,',
buy.x[1], 'grams meat byproducts, and',
buy.x[2], 'grams grain.')
print('Minimum per serving cost = $',
sum(buy.x * np.array([6, 7, 8])) / 100)
# 3. A new test has been developed to detect a particular type of cancer. The test must be evaluated before it is put
# into use. A medical researcher selects a random sample of 1,000 adults and finds (by other means) that 4% have this
# type of cancer. Each of the 1,000 adults is given the new test, and it is found that the test indicates cancer in 99%
# of those who have it and in 1% of those who do not.
# a) Based on these results, what is the probability of a randomly chosen person having cancer given that the test indicates cancer?
# b) What is the probability of a person having cancer given that the test does not indicate cancer?
has_cancer = 0.04
true_pos = 0.99
false_pos = 0.01
a = has_cancer * true_pos
b = (1 - has_cancer) * false_pos
prob = a / (a + b)
print("a) Probability that person has cancer given that the test indicates cancer:",
round(prob * 100, 3), "%.")
a = has_cancer * (1 - true_pos) # False neg
b = (1 - has_cancer) * (1 - false_pos) # True neg
prob = a / (a + b)
print("b) Probability that person has cancer given that the test doesn't indicates cancer:",
round(prob * 100, 3), "%.")
# 4. The following is a graph of a third-degree polynomial with leading coefficient 1. Determine the function depicted
# in the graph. Using Python, recreate the graph of the original function, 𝑓(𝑥), as well as the graph of its first and
# second derivatives.
import numpy as np
import matplotlib.pyplot as plt
def f(x):
y = (x + 3) * (x + 1) * (x - 2)
return (y)
def g(x):
#f(x) = x^3 + 2x^2 - 5x - 6
y = 3 * x ** 2 + 4 * x - 5
return(y)
def h(x):
y = 6 * x + 4
return(y)
x = np.arange(-4, 4.5, 0.5)
plt.plot(x, f(x), 'b', label = 'y = f(x)')
plt.plot(x, g(x), 'b-.', label = "y = f'(x)")
plt.plot(x, h(x), 'b--', label = "y = f''(x)")
plt.plot( 1, f( 1), 'ys')
plt.plot(-2, f(-2), 'ys')
plt.plot( 2, f( 2), 'rs')
plt.plot(-1, f(-1), 'rs')
plt.plot(-3, f(-3), 'rs')
plt.plot(x, x * 0, 'g')
plt.legend()
plt.show()
# 5. For a certain drug, the rate of reaction in appropriate units is given by 𝑅'𝑡=4/(𝑡+1) + 3/sqrt(𝑡+1) where 𝑡 is time (in hours)
# after the drug is administered. Calculate the total reaction to the drug from 𝑡=1 to 𝑡=12.
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate
def f(t):
y = 4 * np.log(1 + t) + 6 * np.sqrt(1 + t)
return (y)
def g(t):
y = 4 / (t + 1) + 3 / (np.sqrt(1 + t))
return (y)
t = np.arange(1, 12.5, 0.5)
plt.plot(t, f(t), 'b', label = 'R(t)')
plt.plot(t, g(t), 'r', label = "R'(t)")
plt.legend()
plt.show()
total = f(12) - f(1)
print(total)
# Confirm
func = lambda t: 4 / (t + 1) + 3 / (np.sqrt(1 + t))
total = integrate.quad(func, 1, 12)
print(total[0])
print("Total reaction from t=1 to t=12 is",
round(total[0], 4))
# 6. The nationwide attendance per day for a certain summer blockbuster can be approximated using the equation
# 𝐴(𝑡)=13𝑡^2𝑒^-t, where A is the attendance per day in thousands of people and t is the number of months since the
# release of the film. Find and interpret the rate of change of the daily attendance after 4 months and interpret
# the result.
import numpy as np
from scipy.misc import derivative
import matplotlib.pyplot as plt
def A(t):
y = 13 * t**2 * np.exp(-t)
return (y)
h = 1e-5
a = (A(4+h) - A(4)) / h
print(a)
print(A(np.arange(0, 6, 1)))
t = np.arange(0, 10, 1)
plt.plot(t, A(t), 'b', label = 'A(t)')
plt.plot(t, derivative(A, t, dx = 1e-5), 'r--', label = "A'(t)")
plt.legend()
plt.show()
# Confirm
der = derivative(A, 4, dx = 1e-5)
print(der)
print("Rate of change at t=4", der)
# 7. The population of mathematicians in the eastern part of Evanston is given by the formula 𝑃(𝑡)=(𝑡^2+100) * ln(𝑡+2),
# where t represents the time in years since 2000. Using Python, find the rate of change of this population in 2006.
import numpy as np
from scipy.misc import derivative
import matplotlib.pyplot as plt
def P(t):
y = (t**2 + 100) * np.log(t + 2)
return (y)
h = 1e-5
a = (P(6 + h) - P(6)) / h
print(a)
t = np.arange(0, 10, 1)
plt.plot(t, P(t), 'b', label = 'P(t)')
plt.plot(t, derivative(P, t, dx = 1e-5), 'r--', label = "P'(t)")
plt.legend()
plt.show()
# Confirm
der = derivative(P, 6, dx = 1e-5)
print(der)
print("Rate of change at t=2006", der)
# 8. The rate of change in a person's body temperature, with respect to the dosage of x milligrams of a certain diuretic,
# is given by 𝐷′(x) = 2 / (x+6). One milligram raises the temperature 2.2°C. Find the function giving the total
# temperature change. If someone takes 5 milligrams of this diuretic what will their body temperature be, assuming they
# start off at a normal temperature of 37oC?
import numpy as np
import matplotlib.pyplot as plt
def f(x):
y = 2 * np.log(x + 6)
return (y)
def g(x):
y = 2 / (x + 6)
return (y)
temp = 37
C = 2.2 - f(1)
t = np.arange(0, 5.5, 0.5)
plt.plot(t, f(t) + C, 'b', label = 'D(x)')
plt.plot(t, g(t), 'r--', label = "D'(x)")
plt.legend()
plt.show()
new_temp = f(5) + C + temp
print(new_temp)
print("Temp after taking 5 milligrams=", new_temp)
# 9. The following function represents the life of a lightbulb in years based on average consumer usage.
# Show 𝑓(𝑥) is a probability density function on [0,∞).
# 𝑓(𝑥) = x^3/12, if 0 <= x <= 2
# 𝑓(𝑥) = 16/x^4, if x > 2
# Determine the probability a lightbulb will last between 1 and 5 years.
import numpy as np
import matplotlib.pyplot as plt
x1 = np.arange(0, 2.1, 0.1)
x2 = np.arange(2.1, 20, 0.1)
def f(x):
y = x**3 / 12
return | |
<gh_stars>0
#
# Copyright (c) 2001 Bizar Software Pty Ltd (http://www.bizarsoftware.com.au/)
# This module is free software, and you may redistribute it and/or modify
# under the same terms as Python, so long as this copyright message and
# disclaimer are retained in their original form.
#
# IN NO EVENT SHALL BIZAR SOFTWARE PTY LTD BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING
# OUT OF THE USE OF THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# BIZAR SOFTWARE PTY LTD SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS"
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
""" Relational database (SQL) backend common code.
Basics:
- map roundup classes to relational tables
- automatically detect schema changes and modify the table schemas
appropriately (we store the "database version" of the schema in the
database itself as the only row of the "schema" table)
- multilinks (which represent a many-to-many relationship) are handled through
intermediate tables
- journals are stored adjunct to the per-class tables
- table names and columns have "_" prepended so the names can't clash with
restricted names (like "order")
- retirement is determined by the __retired__ column being > 0
Database-specific changes may generally be pushed out to the overridable
sql_* methods, since everything else should be fairly generic. There's
probably a bit of work to be done if a database is used that actually
honors column typing, since the initial databases don't (sqlite stores
everything as a string.)
The schema of the hyperdb being mapped to the database is stored in the
database itself as a repr()'ed dictionary of information about each Class
that maps to a table. If that information differs from the hyperdb schema,
then we update it. We also store in the schema dict a version which
allows us to upgrade the database schema when necessary. See upgrade_db().
To force a unqiueness constraint on the key properties we put the item
id into the __retired__ column duing retirement (so it's 0 for "active"
items) and place a unqiueness constraint on key + __retired__. This is
particularly important for the users class where multiple users may
try to have the same username, with potentially many retired users with
the same name.
"""
__docformat__ = 'restructuredtext'
# standard python modules
import os, time, re, weakref, copy, logging, datetime
# roundup modules
from roundup import hyperdb, date, password, roundupdb, security, support
from roundup.hyperdb import String, Password, Date, Interval, Link, \
Multilink, DatabaseError, Boolean, Number, Integer
from roundup.i18n import _
# support
from roundup.backends.blobfiles import FileStorage
from roundup.backends.indexer_common import get_indexer
from roundup.backends.sessions_rdbms import Sessions, OneTimeKeys
from roundup.date import Range
from roundup.backends.back_anydbm import compile_expression
from roundup.anypy.strings import b2s, bs2b, us2s, repr_export, eval_import
from hashlib import md5
# dummy value meaning "argument not passed"
_marker = []
def _num_cvt(num):
num = str(num)
try:
return int(num)
except ValueError:
return float(num)
def _bool_cvt(value):
if value in ('TRUE', 'FALSE'):
return {'TRUE': 1, 'FALSE': 0}[value]
# assume it's a number returned from the db API
return int(value)
def date_to_hyperdb_value(d):
""" convert date d to a roundup date """
if isinstance(d, datetime.datetime):
return date.Date(d)
return date.Date(str(d).replace(' ', '.'))
def connection_dict(config, dbnamestr=None):
""" Used by Postgresql and MySQL to detemine the keyword args for
opening the database connection."""
d = {}
if dbnamestr:
d[dbnamestr] = config.RDBMS_NAME
for name in ('host', 'port', 'password', 'user', 'read_default_group',
'read_default_file'):
cvar = 'RDBMS_'+name.upper()
if config[cvar] is not None:
d[name] = config[cvar]
return d
class IdListOptimizer:
""" To prevent flooding the SQL parser of the underlaying
db engine with "x IN (1, 2, 3, ..., <large number>)" collapses
these cases to "x BETWEEN 1 AND <large number>".
"""
def __init__(self):
self.ranges = []
self.singles = []
def append(self, nid):
""" Invariant: nid are ordered ascending """
if self.ranges:
last = self.ranges[-1]
if last[1] == nid-1:
last[1] = nid
return
if self.singles:
last = self.singles[-1]
if last == nid-1:
self.singles.pop()
self.ranges.append([last, nid])
return
self.singles.append(nid)
def where(self, field, placeholder):
ranges = self.ranges
singles = self.singles
if not singles and not ranges: return "(1=0)", []
if ranges:
between = '%s BETWEEN %s AND %s' % (
field, placeholder, placeholder)
stmnt = [between] * len(ranges)
else:
stmnt = []
if singles:
stmnt.append('%s in (%s)' % (
field, ','.join([placeholder]*len(singles))))
return '(%s)' % ' OR '.join(stmnt), sum(ranges, []) + singles
def __str__(self):
return "ranges: %r / singles: %r" % (self.ranges, self.singles)
class Database(FileStorage, hyperdb.Database, roundupdb.Database):
""" Wrapper around an SQL database that presents a hyperdb interface.
- some functionality is specific to the actual SQL database, hence
the sql_* methods that are NotImplemented
- we keep a cache of the latest N row fetches (where N is
configurable).
"""
def __init__(self, config, journaltag=None):
""" Open the database and load the schema from it.
"""
FileStorage.__init__(self, config.UMASK)
self.config, self.journaltag = config, journaltag
self.dir = config.DATABASE
self.classes = {}
self.indexer = get_indexer(config, self)
self.security = security.Security(self)
# additional transaction support for external files and the like
self.transactions = []
# keep a cache of the N most recently retrieved rows of any kind
# (classname, nodeid) = row
self.cache_size = config.RDBMS_CACHE_SIZE
self.clearCache()
self.stats = {'cache_hits': 0, 'cache_misses': 0, 'get_items': 0,
'filtering': 0}
# make sure the database directory exists
if not os.path.isdir(self.config.DATABASE):
os.makedirs(self.config.DATABASE)
# database lock
self.lockfile = None
# Uppercase to not collide with Class names
self.Session = None
self.Otk = None
# open a connection to the database, creating the "conn" attribute
self.open_connection()
def clearCache(self):
self.cache = {}
self.cache_lru = []
# upcall is necessary!
roundupdb.Database.clearCache(self)
def getSessionManager(self):
if not self.Session:
self.Session = Sessions(self)
return self.Session
def getOTKManager(self):
if not self.Otk:
self.Otk = OneTimeKeys(self)
return self.Otk
def open_connection(self):
""" Open a connection to the database, creating it if necessary.
Must call self.load_dbschema()
"""
raise NotImplementedError
def sql(self, sql, args=None, cursor=None):
""" Execute the sql with the optional args.
"""
self.log_debug('SQL %r %r' % (sql, args))
if not cursor:
cursor = self.cursor
if args:
cursor.execute(sql, args)
else:
cursor.execute(sql)
def sql_fetchone(self):
""" Fetch a single row. If there's nothing to fetch, return None.
"""
return self.cursor.fetchone()
def sql_fetchall(self):
""" Fetch all rows. If there's nothing to fetch, return [].
"""
return self.cursor.fetchall()
def sql_fetchiter(self):
""" Fetch all row as a generator
"""
while True:
row = self.cursor.fetchone()
if not row: break
yield row
def search_stringquote(self, value):
""" Quote a search string to escape magic search characters
'%' and '_', also need to quote '\' (first)
Then put '%' around resulting string for LIKE (or ILIKE) operator
"""
v = value.replace('\\', '\\\\')
v = v.replace('%', '\\%')
v = v.replace('_', '\\_')
return '%' + v + '%'
def init_dbschema(self):
self.database_schema = {
'version': self.current_db_version,
'tables': {}
}
def load_dbschema(self):
""" Load the schema definition that the database currently implements
"""
self.cursor.execute('select schema from schema')
schema = self.cursor.fetchone()
if schema:
# bandit - schema is trusted
self.database_schema = eval(schema[0]) # nosec
else:
self.database_schema = {}
def save_dbschema(self):
""" Save the schema definition that the database currently implements
"""
s = repr(self.database_schema)
self.sql('delete from schema')
self.sql('insert into schema values (%s)' % self.arg, (s,))
def post_init(self):
""" Called once the schema initialisation has finished.
We should now confirm that the schema defined by our "classes"
attribute actually matches the schema in the database.
"""
super(Database, self).post_init()
# upgrade the database for column type changes, new internal
# tables, etc.
save = self.upgrade_db()
# handle changes in the schema
tables = self.database_schema['tables']
for classname, spec in self.classes.items():
if classname in tables:
dbspec = tables[classname]
if self.update_class(spec, dbspec):
tables[classname] = spec.schema()
save = 1
else:
self.create_class(spec)
tables[classname] = spec.schema()
save = 1
for classname, _spec in list(tables.items()):
if classname not in self.classes:
self.drop_class(classname, tables[classname])
del tables[classname]
save = 1
# update the database version of the schema
if save:
self.save_dbschema()
# reindex the db if necessary
if self.indexer.should_reindex():
self.reindex()
# commit
self.sql_commit()
# update this number when we need to make changes to the SQL structure
# of the backen | |
'subsector', 'name', 'min_lifetime', 'max_lifetime', 'source', 'additional_description', 'demand_tech_unit_type', 'unit', 'time_unit', 'cost_of_capital', 'stock_decay_function', 'mean_lifetime', 'lifetime_variance', 'shape', 'max_lead_hours', 'max_lag_hours']
def __init__(self, scenario, linked=None, stock_link_ratio=None, subsector=None, name=None, min_lifetime=None,
max_lifetime=None, source=None, additional_description=None, demand_tech_unit_type=None,
unit=None, time_unit=None, cost_of_capital=None, stock_decay_function=None,
mean_lifetime=None, lifetime_variance=None, shape=None, max_lead_hours=None,
max_lag_hours=None):
DataObject.__init__(self, scenario, name, "")
DemandTechs._instances_by_key[self._key] = self
self.linked = linked
self.stock_link_ratio = stock_link_ratio
self.subsector = subsector
self.name = name
self.min_lifetime = min_lifetime
self.max_lifetime = max_lifetime
self.source = source
self.additional_description = additional_description
self.demand_tech_unit_type = demand_tech_unit_type
self.unit = unit
self.time_unit = time_unit
self.cost_of_capital = cost_of_capital
self.stock_decay_function = stock_decay_function
self.mean_lifetime = mean_lifetime
self.lifetime_variance = lifetime_variance
self.shape = shape
self.max_lead_hours = max_lead_hours
self.max_lag_hours = max_lag_hours
@classmethod
def from_tuple(cls, scenario, tup, **kwargs):
(linked, stock_link_ratio, subsector, name, min_lifetime, max_lifetime, source,
additional_description, demand_tech_unit_type, unit, time_unit, cost_of_capital,
stock_decay_function, mean_lifetime, lifetime_variance, shape, max_lead_hours,
max_lag_hours) = tup
obj = cls(scenario, linked=linked, stock_link_ratio=stock_link_ratio, subsector=subsector, name=name,
min_lifetime=min_lifetime, max_lifetime=max_lifetime, source=source,
additional_description=additional_description,
demand_tech_unit_type=demand_tech_unit_type, unit=unit, time_unit=time_unit,
cost_of_capital=cost_of_capital, stock_decay_function=stock_decay_function,
mean_lifetime=mean_lifetime, lifetime_variance=lifetime_variance, shape=shape,
max_lead_hours=max_lead_hours, max_lag_hours=max_lag_hours)
return obj
class DemandTechsAuxEfficiency(DataObject):
_instances_by_key = {}
_key_col = "demand_technology"
_cols = ['demand_technology', 'definition', 'reference_tech_id', 'geography', 'other_index_1', 'other_index_2', 'final_energy', 'demand_tech_efficiency_types', 'is_numerator_service', 'numerator_unit', 'denominator_unit', 'interpolation_method', 'extrapolation_method', 'extrapolation_growth', 'age_growth_or_decay_type', 'age_growth_or_decay', 'shape']
def __init__(self, scenario, demand_technology=None, definition=None, reference_tech_id=None, geography=None,
other_index_1=None, other_index_2=None, final_energy=None,
demand_tech_efficiency_types=None, is_numerator_service=None, numerator_unit=None,
denominator_unit=None, interpolation_method=None, extrapolation_method=None,
extrapolation_growth=None, age_growth_or_decay_type=None, age_growth_or_decay=None,
shape=None):
DataObject.__init__(self, scenario, demand_technology, "DemandTechsAuxEfficiencyData")
DemandTechsAuxEfficiency._instances_by_key[self._key] = self
self.demand_technology = demand_technology
self.definition = definition
self.reference_tech_id = reference_tech_id
self.geography = geography
self.other_index_1 = other_index_1
self.other_index_2 = other_index_2
self.final_energy = final_energy
self.demand_tech_efficiency_types = demand_tech_efficiency_types
self.is_numerator_service = is_numerator_service
self.numerator_unit = numerator_unit
self.denominator_unit = denominator_unit
self.interpolation_method = interpolation_method
self.extrapolation_method = extrapolation_method
self.extrapolation_growth = extrapolation_growth
self.age_growth_or_decay_type = age_growth_or_decay_type
self.age_growth_or_decay = age_growth_or_decay
self.shape = shape
@classmethod
def from_tuple(cls, scenario, tup, **kwargs):
(demand_technology, definition, reference_tech_id, geography, other_index_1, other_index_2,
final_energy, demand_tech_efficiency_types, is_numerator_service, numerator_unit,
denominator_unit, interpolation_method, extrapolation_method, extrapolation_growth,
age_growth_or_decay_type, age_growth_or_decay, shape) = tup
obj = cls(scenario, demand_technology=demand_technology, definition=definition,
reference_tech_id=reference_tech_id, geography=geography, other_index_1=other_index_1,
other_index_2=other_index_2, final_energy=final_energy,
demand_tech_efficiency_types=demand_tech_efficiency_types,
is_numerator_service=is_numerator_service, numerator_unit=numerator_unit,
denominator_unit=denominator_unit, interpolation_method=interpolation_method,
extrapolation_method=extrapolation_method, extrapolation_growth=extrapolation_growth,
age_growth_or_decay_type=age_growth_or_decay_type,
age_growth_or_decay=age_growth_or_decay, shape=shape)
return obj
class DemandTechsCapitalCost(DataObject):
_instances_by_key = {}
_key_col = "demand_technology"
_cols = ['demand_technology', 'definition', 'reference_tech_id', 'geography', 'other_index_1', 'other_index_2', 'currency', 'currency_year_id', 'is_levelized', 'interpolation_method', 'extrapolation_method', 'extrapolation_growth', 'reference_tech_operation']
def __init__(self, scenario, demand_technology=None, definition=None, reference_tech_id=None, geography=None,
other_index_1=None, other_index_2=None, currency=None, currency_year_id=None,
is_levelized=None, interpolation_method=None, extrapolation_method=None,
extrapolation_growth=None, reference_tech_operation=None):
DataObject.__init__(self, scenario, demand_technology, "")
DemandTechsCapitalCost._instances_by_key[self._key] = self
self.demand_technology = demand_technology
self.definition = definition
self.reference_tech_id = reference_tech_id
self.geography = geography
self.other_index_1 = other_index_1
self.other_index_2 = other_index_2
self.currency = currency
self.currency_year_id = currency_year_id
self.is_levelized = is_levelized
self.interpolation_method = interpolation_method
self.extrapolation_method = extrapolation_method
self.extrapolation_growth = extrapolation_growth
self.reference_tech_operation = reference_tech_operation
@classmethod
def from_tuple(cls, scenario, tup, **kwargs):
(demand_technology, definition, reference_tech_id, geography, other_index_1, other_index_2,
currency, currency_year_id, is_levelized, interpolation_method, extrapolation_method,
extrapolation_growth, reference_tech_operation) = tup
obj = cls(scenario, demand_technology=demand_technology, definition=definition,
reference_tech_id=reference_tech_id, geography=geography, other_index_1=other_index_1,
other_index_2=other_index_2, currency=currency, currency_year_id=currency_year_id,
is_levelized=is_levelized, interpolation_method=interpolation_method,
extrapolation_method=extrapolation_method, extrapolation_growth=extrapolation_growth,
reference_tech_operation=reference_tech_operation)
return obj
class DemandTechsFixedMaintenanceCost(DataObject):
_instances_by_key = {}
_key_col = "demand_technology"
_cols = ['demand_technology', 'definition', 'reference_tech_id', 'geography', 'other_index_1', 'other_index_2', 'currency', 'currency_year_id', 'interpolation_method', 'extrapolation_method', 'extrapolation_growth', 'age_growth_or_decay_type', 'age_growth_or_decay', 'additional_description']
def __init__(self, scenario, demand_technology=None, definition=None, reference_tech_id=None, geography=None,
other_index_1=None, other_index_2=None, currency=None, currency_year_id=None,
interpolation_method=None, extrapolation_method=None, extrapolation_growth=None,
age_growth_or_decay_type=None, age_growth_or_decay=None, additional_description=None):
DataObject.__init__(self, scenario, demand_technology, "DemandTechsFixedMaintenanceCostData")
DemandTechsFixedMaintenanceCost._instances_by_key[self._key] = self
self.demand_technology = demand_technology
self.definition = definition
self.reference_tech_id = reference_tech_id
self.geography = geography
self.other_index_1 = other_index_1
self.other_index_2 = other_index_2
self.currency = currency
self.currency_year_id = currency_year_id
self.interpolation_method = interpolation_method
self.extrapolation_method = extrapolation_method
self.extrapolation_growth = extrapolation_growth
self.age_growth_or_decay_type = age_growth_or_decay_type
self.age_growth_or_decay = age_growth_or_decay
self.additional_description = additional_description
@classmethod
def from_tuple(cls, scenario, tup, **kwargs):
(demand_technology, definition, reference_tech_id, geography, other_index_1, other_index_2,
currency, currency_year_id, interpolation_method, extrapolation_method,
extrapolation_growth, age_growth_or_decay_type, age_growth_or_decay,
additional_description) = tup
obj = cls(scenario, demand_technology=demand_technology, definition=definition,
reference_tech_id=reference_tech_id, geography=geography, other_index_1=other_index_1,
other_index_2=other_index_2, currency=currency, currency_year_id=currency_year_id,
interpolation_method=interpolation_method, extrapolation_method=extrapolation_method,
extrapolation_growth=extrapolation_growth,
age_growth_or_decay_type=age_growth_or_decay_type,
age_growth_or_decay=age_growth_or_decay, additional_description=additional_description)
return obj
class DemandTechsFuelSwitchCost(DataObject):
_instances_by_key = {}
_key_col = "demand_technology"
_cols = ['demand_technology', 'definition', 'reference_tech_id', 'geography', 'other_index_1', 'other_index_2', 'currency', 'currency_year_id', 'is_levelized', 'interpolation_method', 'extrapolation_method', 'extrapolation_growth']
def __init__(self, scenario, demand_technology=None, definition=None, reference_tech_id=None, geography=None,
other_index_1=None, other_index_2=None, currency=None, currency_year_id=None,
is_levelized=None, interpolation_method=None, extrapolation_method=None,
extrapolation_growth=None):
DataObject.__init__(self, scenario, demand_technology, "DemandTechsFuelSwitchCostData")
DemandTechsFuelSwitchCost._instances_by_key[self._key] = self
self.demand_technology = demand_technology
self.definition = definition
self.reference_tech_id = reference_tech_id
self.geography = geography
self.other_index_1 = other_index_1
self.other_index_2 = other_index_2
self.currency = currency
self.currency_year_id = currency_year_id
self.is_levelized = is_levelized
self.interpolation_method = interpolation_method
self.extrapolation_method = extrapolation_method
self.extrapolation_growth = extrapolation_growth
@classmethod
def from_tuple(cls, scenario, tup, **kwargs):
(demand_technology, definition, reference_tech_id, geography, other_index_1, other_index_2,
currency, currency_year_id, is_levelized, interpolation_method, extrapolation_method,
extrapolation_growth) = tup
obj = cls(scenario, demand_technology=demand_technology, definition=definition,
reference_tech_id=reference_tech_id, geography=geography, other_index_1=other_index_1,
other_index_2=other_index_2, currency=currency, currency_year_id=currency_year_id,
is_levelized=is_levelized, interpolation_method=interpolation_method,
extrapolation_method=extrapolation_method, extrapolation_growth=extrapolation_growth)
return obj
class DemandTechsInstallationCost(DataObject):
_instances_by_key = {}
_key_col = "demand_technology"
_cols = ['demand_technology', 'definition', 'reference_tech_id', 'geography', 'other_index_1', 'other_index_2', 'currency', 'currency_year_id', 'is_levelized', 'interpolation_method', 'extrapolation_method', 'extrapolation_growth']
def __init__(self, scenario, demand_technology=None, definition=None, reference_tech_id=None, geography=None,
other_index_1=None, other_index_2=None, currency=None, currency_year_id=None,
is_levelized=None, interpolation_method=None, extrapolation_method=None,
extrapolation_growth=None):
DataObject.__init__(self, scenario, demand_technology, "")
DemandTechsInstallationCost._instances_by_key[self._key] = self
self.demand_technology = demand_technology
self.definition = definition
self.reference_tech_id = reference_tech_id
self.geography = geography
self.other_index_1 = other_index_1
self.other_index_2 = other_index_2
self.currency = currency
self.currency_year_id = currency_year_id
self.is_levelized = is_levelized
self.interpolation_method = interpolation_method
self.extrapolation_method = extrapolation_method
self.extrapolation_growth = extrapolation_growth
@classmethod
def from_tuple(cls, scenario, tup, **kwargs):
(demand_technology, definition, reference_tech_id, geography, other_index_1, other_index_2,
currency, currency_year_id, is_levelized, interpolation_method, extrapolation_method,
extrapolation_growth) = tup
obj = cls(scenario, demand_technology=demand_technology, definition=definition,
reference_tech_id=reference_tech_id, geography=geography, other_index_1=other_index_1,
other_index_2=other_index_2, currency=currency, currency_year_id=currency_year_id,
is_levelized=is_levelized, interpolation_method=interpolation_method,
extrapolation_method=extrapolation_method, extrapolation_growth=extrapolation_growth)
return obj
class DemandTechsMainEfficiency(DataObject):
_instances_by_key = {}
_key_col = "demand_technology"
_cols = ['demand_technology', 'definition', 'reference_tech_id', 'geography', 'other_index_1', 'other_index_2', 'final_energy', 'utility_factor', 'is_numerator_service', 'numerator_unit', 'denominator_unit', 'interpolation_method', 'extrapolation_method', 'extrapolation_growth', 'age_growth_or_decay_type', 'age_growth_or_decay', 'geography_map_key']
def __init__(self, scenario, demand_technology=None, definition=None, reference_tech_id=None, geography=None,
other_index_1=None, other_index_2=None, final_energy=None, utility_factor=None,
is_numerator_service=None, numerator_unit=None, denominator_unit=None,
interpolation_method=None, extrapolation_method=None, extrapolation_growth=None,
age_growth_or_decay_type=None, age_growth_or_decay=None, geography_map_key=None):
DataObject.__init__(self, scenario, demand_technology, "DemandTechsMainEfficiencyData")
DemandTechsMainEfficiency._instances_by_key[self._key] = self
self.demand_technology = demand_technology
self.definition = definition
self.reference_tech_id = reference_tech_id
self.geography = geography
self.other_index_1 = other_index_1
self.other_index_2 = other_index_2
self.final_energy = final_energy
self.utility_factor = utility_factor
self.is_numerator_service = is_numerator_service
self.numerator_unit = numerator_unit
self.denominator_unit = denominator_unit
self.interpolation_method = interpolation_method
self.extrapolation_method = extrapolation_method
self.extrapolation_growth = extrapolation_growth
self.age_growth_or_decay_type = age_growth_or_decay_type
self.age_growth_or_decay = age_growth_or_decay
self.geography_map_key = geography_map_key
@classmethod
def from_tuple(cls, scenario, tup, **kwargs):
(demand_technology, definition, reference_tech_id, geography, other_index_1, other_index_2,
final_energy, utility_factor, is_numerator_service, numerator_unit, denominator_unit,
interpolation_method, extrapolation_method, extrapolation_growth,
age_growth_or_decay_type, age_growth_or_decay, geography_map_key) = tup
obj = cls(scenario, demand_technology=demand_technology, definition=definition,
reference_tech_id=reference_tech_id, geography=geography, other_index_1=other_index_1,
other_index_2=other_index_2, final_energy=final_energy, utility_factor=utility_factor,
is_numerator_service=is_numerator_service, numerator_unit=numerator_unit,
denominator_unit=denominator_unit, interpolation_method=interpolation_method,
extrapolation_method=extrapolation_method, extrapolation_growth=extrapolation_growth,
age_growth_or_decay_type=age_growth_or_decay_type,
age_growth_or_decay=age_growth_or_decay, geography_map_key=geography_map_key)
return obj
class DemandTechsParasiticEnergy(DataObject):
_instances_by_key = {}
_key_col = "demand_technology"
_cols = ['demand_technology', 'definition', 'reference_tech_id', 'geography', 'other_index_1', 'other_index_2', 'energy_unit', 'time_unit', 'interpolation_method', 'extrapolation_method', 'extrapolation_growth', 'age_growth_or_decay_type', 'age_growth_or_decay']
def __init__(self, scenario, demand_technology=None, definition=None, reference_tech_id=None, geography=None,
other_index_1=None, other_index_2=None, energy_unit=None, time_unit=None,
interpolation_method=None, extrapolation_method=None, extrapolation_growth=None,
age_growth_or_decay_type=None, age_growth_or_decay=None):
DataObject.__init__(self, scenario, demand_technology, "DemandTechsParasiticEnergyData")
DemandTechsParasiticEnergy._instances_by_key[self._key] = self
self.demand_technology = demand_technology
self.definition = definition
self.reference_tech_id = reference_tech_id
self.geography = geography
self.other_index_1 = other_index_1
self.other_index_2 = other_index_2
self.energy_unit = energy_unit
self.time_unit = time_unit
self.interpolation_method = interpolation_method
self.extrapolation_method = extrapolation_method
self.extrapolation_growth = extrapolation_growth
self.age_growth_or_decay_type = age_growth_or_decay_type
self.age_growth_or_decay = age_growth_or_decay
@classmethod
def from_tuple(cls, scenario, tup, **kwargs):
(demand_technology, definition, reference_tech_id, geography, other_index_1, other_index_2,
energy_unit, time_unit, interpolation_method, extrapolation_method, extrapolation_growth,
age_growth_or_decay_type, age_growth_or_decay) = tup
obj = cls(scenario, demand_technology=demand_technology, definition=definition,
reference_tech_id=reference_tech_id, geography=geography, other_index_1=other_index_1,
other_index_2=other_index_2, energy_unit=energy_unit, time_unit=time_unit,
interpolation_method=interpolation_method, extrapolation_method=extrapolation_method,
extrapolation_growth=extrapolation_growth,
age_growth_or_decay_type=age_growth_or_decay_type,
age_growth_or_decay=age_growth_or_decay)
return obj
class DemandTechsServiceDemandModifier(DataObject):
_instances_by_key = {}
_key_col = "demand_technology"
_cols = ['demand_technology', 'geography', 'other_index_1', 'other_index_2', 'interpolation_method', 'extrapolation_method', 'extrapolation_growth', 'age_growth_or_decay_type', 'age_growth_or_decay']
def __init__(self, scenario, demand_technology=None, geography=None, other_index_1=None, other_index_2=None,
interpolation_method=None, extrapolation_method=None, extrapolation_growth=None,
age_growth_or_decay_type=None, age_growth_or_decay=None):
DataObject.__init__(self, scenario, demand_technology, "DemandTechsServiceDemandModifierData")
DemandTechsServiceDemandModifier._instances_by_key[self._key] = self
self.demand_technology = demand_technology
self.geography = geography
self.other_index_1 = other_index_1
self.other_index_2 = other_index_2
self.interpolation_method = interpolation_method
self.extrapolation_method = extrapolation_method
self.extrapolation_growth = extrapolation_growth
self.age_growth_or_decay_type = age_growth_or_decay_type
self.age_growth_or_decay = age_growth_or_decay
@classmethod
def from_tuple(cls, scenario, tup, **kwargs):
(demand_technology, geography, other_index_1, other_index_2, interpolation_method,
extrapolation_method, extrapolation_growth, age_growth_or_decay_type, age_growth_or_decay) = tup
obj = cls(scenario, demand_technology=demand_technology, geography=geography, other_index_1=other_index_1,
other_index_2=other_index_2, interpolation_method=interpolation_method,
extrapolation_method=extrapolation_method, extrapolation_growth=extrapolation_growth,
age_growth_or_decay_type=age_growth_or_decay_type,
age_growth_or_decay=age_growth_or_decay)
return obj
class DemandTechsServiceLink(DataObject):
_instances_by_key = {}
_key_col = "demand_technology"
_cols = ['id', 'service_link', 'demand_technology', 'definition', 'reference_id', 'geography', 'other_index_1', 'other_index_2', 'interpolation_method', 'extrapolation_method', 'extrapolation_growth', 'age_growth_or_decay_type', 'age_growth_or_decay']
def __init__(self, scenario, id=None, service_link=None, demand_technology=None, definition=None, reference_id=None,
geography=None, other_index_1=None, other_index_2=None, interpolation_method=None,
extrapolation_method=None, extrapolation_growth=None, age_growth_or_decay_type=None,
age_growth_or_decay=None):
DataObject.__init__(self, scenario, demand_technology, "DemandTechsServiceLinkData")
DemandTechsServiceLink._instances_by_key[self._key] = self
self.id = id
self.service_link = service_link
self.demand_technology = demand_technology
self.definition = definition
self.reference_id = reference_id
self.geography = geography
self.other_index_1 = other_index_1
self.other_index_2 = other_index_2
self.interpolation_method = interpolation_method
self.extrapolation_method = extrapolation_method
self.extrapolation_growth = extrapolation_growth
self.age_growth_or_decay_type = age_growth_or_decay_type
self.age_growth_or_decay = age_growth_or_decay
@classmethod
def from_tuple(cls, scenario, tup, **kwargs):
(id, service_link, demand_technology, definition, reference_id, geography, other_index_1,
other_index_2, interpolation_method, extrapolation_method, extrapolation_growth,
age_growth_or_decay_type, age_growth_or_decay) = tup
obj = cls(scenario, id=id, service_link=service_link, demand_technology=demand_technology,
definition=definition, reference_id=reference_id, geography=geography,
other_index_1=other_index_1, other_index_2=other_index_2,
interpolation_method=interpolation_method, extrapolation_method=extrapolation_method,
| |
width, 4, 5, 6, 7,
8 - width / 2 * 3, 9 - width / 2 * 3]), tr_sur_tv, alpha=0.9, width=width,
hatch='\\', edgecolor='black', label='TR+SUR')
plt.bar(np.array([1, 2, 3, 4 + width, 5 + width, 6 + width, 7 + width,
8 - width / 2, 9 - width / 2]), admm_sur_tv, alpha=0.9, width=width,
hatch='+', edgecolor='black', label='ADMM+SUR')
plt.bar(np.array([8 + width / 2, 9 + width / 2]), pgrape_sur_tv, alpha=0.9, width=width,
hatch='o', edgecolor='black', label='p-GRAPE+SUR')
plt.bar(np.array([1 + width, 2 + width, 3 + width, 8 + width / 2 * 3, 9 + width / 2 * 3]), tr_st_tv, alpha=0.9,
width=width, hatch='.', edgecolor='black', label='TR+ST')
plt.bar(np.array([1 + width * 2, 2 + width * 2, 3 + width * 2, 8 + width / 2 * 5, 9 + width / 2 * 5]), admm_st_tv,
alpha=0.9, width=width, hatch='*', edgecolor='black', label='ADMM+ST')
x_loc = plt.MultipleLocator(1)
ax = plt.gca()
ax.xaxis.set_major_locator(x_loc)
plt.xticks(instance, instance_name)
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data', 0))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
# plt.show()
plt.savefig("../figure_paper/Rounding_all_instances_new.png")
def draw_mt():
plt.figure(figsize=(15, 6), dpi=300)
instance = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
instance_name = ["Energy2", "Energy4", "Energy6", "CNOT5", "CNOT10", "CNOT15", "CNOT20", "CircuitH2", "CircuitLiH"]
grape_mt = [0.841, 0.653728554, 0.316892473, 0.757, 0.842, 0.461, 0.218, 0.291, 0.003]
pgrape_mt = [0.400, 0.037]
tr_mt = [0.841, 0.699677255, 0.382219312, 0.715, 0.677, 0.710, 0.686, 0.409, 0.034]
admm_mt = [0.846, 0.649312043, 0.390978675, 0.475, 0.916, 0.824, 0.483, 0.937, 0.342]
tr_st_mt = [0, 0.837013759, 0.621341738, 0.995, 0.593]
admm_st_mt = [0, 0.635722779, 0.621341738, 0.869, 0.001]
width = 0.15
# plt.bar(np.array([1 - width, 2 - width, 3 - width, 4 - width, 5 - width, 6 - width, 7 - width,
# 8 - width / 2 * 3, 9 - width / 2 * 3]), grape_mt, alpha=0.9, width=width,
# hatch='/', edgecolor='black', label='GRAPE+MT')
# plt.bar(np.array([1, 2, 3, 4, 5, 6, 7, 8 - width / 2, 9 - width / 2]), tr_mt, alpha=0.9, width=width,
# hatch='\\', edgecolor='black', label='TR+MT')
# plt.bar(np.array([1 + width, 2 + width, 3 + width, 4 + width, 5 + width, 6 + width, 7 + width,
# 8 + width / 2, 9 + width / 2]), admm_mt, alpha=0.9, width=width,
# hatch='+', edgecolor='black', label='ADMM+MT')
# plt.bar(np.array([8 + width / 2 * 3, 9 + width / 2 * 3]), pgrape_mt, alpha=0.9, width=width,
# hatch='o', edgecolor='black', label='p-GRAPE+MT')
plt.bar(np.array([1 - width * 2, 2 - width * 2, 3 - width * 2, 4 - width, 5 - width, 6 - width, 7 - width,
8 - width / 2 * 5, 9 - width / 2 * 5]), grape_mt, alpha=0.9, width=width,
hatch='/', edgecolor='black', label='GRAPE+MT')
plt.bar(np.array([1 - width, 2 - width, 3 - width, 4, 5, 6, 7,
8 - width / 2 * 3, 9 - width / 2 * 3]), tr_mt, alpha=0.9, width=width,
hatch='\\', edgecolor='black', label='TR+MT')
plt.bar(np.array([1, 2, 3, 4 + width, 5 + width, 6 + width, 7 + width,
8 - width / 2, 9 - width / 2]), admm_mt, alpha=0.9, width=width,
hatch='+', edgecolor='black', label='ADMM+MT')
plt.bar(np.array([8 + width / 2, 9 + width / 2]), pgrape_mt, alpha=0.9, width=width,
hatch='o', edgecolor='black', label='p-GRAPE+MT')
plt.bar(np.array([1 + width, 2 + width, 3 + width, 8 + width / 2 * 3, 9 + width / 2 * 3]), tr_st_mt, alpha=0.9,
width=width, hatch='.', edgecolor='black', label='TR+STMT')
plt.bar(np.array([1 + width * 2, 2 + width * 2, 3 + width * 2, 8 + width / 2 * 5, 9 + width / 2 * 5]), admm_st_mt,
alpha=0.9, width=width, hatch='*', edgecolor='black', label='ADMM+STMT')
x_loc = plt.MultipleLocator(1)
ax = plt.gca()
ax.xaxis.set_major_locator(x_loc)
plt.xticks(instance, instance_name)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
# plt.show()
plt.savefig("../figure_paper/Minup_time_instances_new.png")
def draw_ms():
plt.figure(figsize=(15, 6), dpi=300)
instance = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
instance_name = ["Energy2", "Energy4", "Energy6", "CNOT5", "CNOT10", "CNOT15", "CNOT20", "CircuitH2", "CircuitLiH"]
grape_ms = [0.971, 0.854509937, 0.758544472, 0.830, 0.989, 0.675, 0.346, 0.230, 0.004]
pgrape_ms = [0.974, 0.713]
tr_ms = [0.96, 0.855868864, 0.747288281, 0.407, 0.981, 0.716, 0.303, 0.962, 0.776]
admm_ms = [0.972, 0.829879395, 0.763374401, 0.809, 0.994, 0.786, 0.381, 0.992, 0.443]
width = 0.15
plt.bar(np.array([1 - width, 2 - width, 3 - width, 4 - width, 5 - width, 6 - width, 7 - width,
8 - width / 2 * 3, 9 - width / 2 * 3]), grape_ms, alpha=0.9, width=width,
hatch='/', edgecolor='black', label='GRAPE+MS')
plt.bar(np.array([1, 2, 3, 4, 5, 6, 7, 8 - width / 2, 9 - width / 2]), tr_ms, alpha=0.9, width=width,
hatch='\\', edgecolor='black', label='TR+MS')
plt.bar(np.array([1 + width, 2 + width, 3 + width, 4 + width, 5 + width, 6 + width, 7 + width,
8 + width / 2, 9 + width / 2]), admm_ms, alpha=0.9, width=width,
hatch='+', edgecolor='black', label='ADMM+MS')
plt.bar(np.array([8 + width / 2 * 3, 9 + width / 2 * 3]), pgrape_ms, alpha=0.9, width=width,
hatch='o', edgecolor='black', label='p-GRAPE+MS')
x_loc = plt.MultipleLocator(1)
ax = plt.gca()
ax.xaxis.set_major_locator(x_loc)
plt.xticks(instance, instance_name)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
# plt.show()
plt.savefig("../figure_paper/Max_switching_all_instances.png")
def draw_grape_obj():
grape_sur = [0.999, 0.859436046, 0.788711064, 0.830, 0.999, 0.999, 0.999, 0.050, 0.209]
grape_mt = [0.841, 0.653728554, 0.316892473, 0.757, 0.842, 0.461, 0.218, 0.291, 0.003]
grape_ms = [0.971, 0.854509937, 0.758544472, 0.830, 0.989, 0.675, 0.346, 0.230, 0.004]
grape_sur_improve = [0.997, 0.858246985, 0.7870738, 0.824, 0.999, 0.999, 0.999544, 0.993, 0.551]
grape_mt_improve = [0.997, 0.834465772, 0.554418567, 0.805, 0.996, 0.994, 0.999, 0.992, 0.96]
grape_ms_improve = [0.999, 0.857057924, 0.775408293, 0.83, 0.999, 0.999, 0.9990531, 0.997, 0.994]
plt.figure(figsize=(8, 6), dpi=300)
instance = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
instance_name = ["Energy2", "Energy4", "Energy6", "CNOT5", "CNOT10", "CNOT15", "CNOT20", "CircuitH2", "CircuitLiH"]
plt.plot(instance, grape_sur, marker='o', linestyle='-', label='GRAPE+SUR')
plt.plot(instance, grape_mt, marker='^', linestyle='-', label='GRAPE+MT')
plt.plot(instance, grape_ms, marker='+', markersize='8', linestyle='-', label='GRAPE+MS')
plt.plot(instance, grape_sur_improve, marker='o', linestyle='--', label='GRAPE+SUR+ALB')
plt.plot(instance, grape_mt_improve, marker='^', linestyle='--', label='GRAPE+MT+ALB')
plt.plot(instance, grape_ms_improve, marker='+', markersize='8', linestyle='--', label='GRAPE+MS+ALB')
x_loc = plt.MultipleLocator(1)
ax = plt.gca()
ax.xaxis.set_major_locator(x_loc)
plt.xticks(instance, instance_name)
# plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.legend()
plt.tight_layout()
# plt.show()
plt.savefig("../figure_paper/grape_obj_all_instances.png")
def draw_grape_obj_instance():
grape = [0.999, 0.863597758, 0.791535344, 0.831, 1.000, 1.000, 1.000, 1.000, 1.000]
grape_sur = [0.999, 0.841476808, 0.7836432, 0.830, 0.999, 0.999, 0.999, 0.050, 0.209]
grape_mt = [0.841, 0.632912135, 0.3161038, 0.757, 0.842, 0.461, 0.218, 0.291, 0.003]
grape_ms = [0.971, 0.836338827, 0.7525139, 0.830, 0.989, 0.675, 0.346, 0.230, 0.004]
grape_sur_improve = [0.997, 0.840329552, 0.7816671, 0.824, 0.999, 0.999, 0.999544, 0.993, 0.551]
grape_mt_improve = [0.997, 0.801350332, 0.5713352, 0.805, 0.996, 0.994, 0.999, 0.992, 0.96]
grape_ms_improve = [0.999, 0.839518644, 0.7711258, 0.83, 0.999, 0.999, 0.9990531, 0.997, 0.994]
all_methods = [grape, grape_sur, grape_mt, grape_ms, grape_sur_improve, grape_mt_improve, grape_ms_improve]
plt.figure(figsize=(8, 6), dpi=300)
methods = np.array([1, 2, 3, 4, 5, 6, 7])
method_name = ["GRAPE", "GRAPE+SUR", "GRAPE+MT", "GRAPE+MS", "GRAPE+SUR+ALB", "GRAPE+MT+ALB", "GRAPE+MS+ALB"]
plt.plot(methods, [1 - method[0] for method in all_methods], marker='o', linestyle='-', label="Energy2")
plt.plot(methods, [1 - method[1] for method in all_methods], marker='^', linestyle='-', label="Energy4")
plt.plot(methods, [1 - method[2] for method in all_methods], marker='+', markersize='8', linestyle='-',
label="Energy6")
plt.plot(methods, [1 - method[3] for method in all_methods], marker='o', linestyle='--', label="CNOT5")
plt.plot(methods, [1 - method[4] for method in all_methods], marker='^', linestyle='--', label="CNOT10")
plt.plot(methods, [1 - method[5] for method in all_methods], marker='+', markersize='8', linestyle='--',
label="CNOT15")
plt.plot(methods, [1 - method[6] for method in all_methods], marker='s', linestyle='--', label='CNOT20')
plt.plot(methods, [1 - method[7] for method in all_methods], marker='o', linestyle='dotted', label="CircuitH2")
plt.plot(methods, [1 - method[8] for method in all_methods], marker='^', linestyle='dotted', label="CircuitLiH")
x_loc = plt.MultipleLocator(1)
ax = plt.gca()
ax.xaxis.set_major_locator(x_loc)
plt.xticks(methods, method_name, rotation=-15)
# plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.legend()
plt.tight_layout()
# plt.show()
plt.savefig("../figure_paper/grape_obj_per_instance_min.png")
def draw_grape_tv():
grape_sur_tv = [54, 26.8, 38.8, 16, 116, 266, 491, 112, 380]
grape_mt_tv = [4, 6, 6, 10, 22, 37, 53, 10, 48]
grape_ms_tv = [10, 10, 10, 16, 39, 38, 39, 26, 208]
grape_sur_improve_tv = [10, 17.2, 32.4, 9, 30, 262, 479, 16, 6]
grape_mt_improve_tv = [4, 6, 6, 9, 23, 33, 28, 4, 18]
grape_ms_improve_tv = [10, 9.2, 10, 16, 39, 39, 40, 18, 50]
plt.figure(figsize=(8, 6), dpi=300)
instance = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
instance_name = ["Energy2", "Energy4", "Energy6", "CNOT5", "CNOT10", "CNOT15", "CNOT20", "CircuitH2",
"CircuitLiH"]
plt.plot(instance, grape_sur_tv, marker='o', linestyle='-', label='GRAPE+SUR')
plt.plot(instance, grape_mt_tv, marker='^', linestyle='-', label='GRAPE+MT')
plt.plot(instance, grape_ms_tv, marker='+', markersize='8', linestyle='-', label='GRAPE+MS')
plt.plot(instance, grape_sur_improve_tv, marker='o', linestyle='--', label='GRAPE+SUR+ALB')
plt.plot(instance, grape_mt_improve_tv, marker='^', linestyle='--', label='GRAPE+MT+ALB')
plt.plot(instance, grape_ms_improve_tv, marker='+', markersize='8', linestyle='--', label='GRAPE+MS+ALB')
x_loc = plt.MultipleLocator(1)
ax = | |
<gh_stars>1-10
#
# This file is part of PyFOPPL, an implementation of a First Order Probabilistic Programming Language in Python.
#
# License: MIT (see LICENSE.txt)
#
# 20. Feb 2018, <NAME>
# 20. Mar 2018, <NAME>
#
from ..fe_clojure import ppl_clojure_forms as clj
from ..ppl_ast import *
from .ppl_clojure_lexer import ClojureLexer
#######################################################################################################################
class ClojureParser(clj.Visitor):
__core_functions__ = {
'append',
'concat',
'conj',
'cons',
'filter',
'interleave',
'into',
'map',
'prepend',
'reduce',
}
def parse_alias(self, alias):
if clj.is_quoted(alias):
alias = alias.last
if clj.is_symbol(alias):
return alias.name, None
elif clj.is_form(alias) and len(alias) == 1 and clj.is_symbol(alias.head):
return alias.head.name, None
elif clj.is_symbol_vector(alias) and len(alias) == 3 and clj.is_symbol(alias[1], ':as'):
return alias.head.name, alias.last.name
return None, None
def parse_bindings(self, bindings):
if clj.is_vector(bindings):
if len(bindings) % 2 != 0:
raise TypeError("the bindings must contain an even number of elements: '{}'".format(bindings))
targets = bindings.items[0::2]
values = bindings.items[1::2]
names = [self.parse_target(target) for target in targets]
values = [value.visit(self) for value in values]
return names, values
else:
raise TypeError("the bindings must be a vector instead of '{}'".format(bindings))
def parse_body(self, body, *, use_return:bool=False):
body = [item.visit(self) for item in body]
if use_return:
if len(body) > 0:
body[-1] = AstReturn(body[-1])
else:
body.append(AstReturn(AstValue(None)))
if len(body) == 1:
return body[0]
else:
return makeBody(body)
def parse_function(self, parameters, body):
if clj.is_symbol_vector(parameters):
params = [p.name for p in parameters.items]
if len(params) >= 2 and params[-2] == '&':
vararg = params[-1]
params = params[:-2]
else:
vararg = None
if '&' in params:
raise SyntaxError("invalid parameters: '{}'".format(parameters))
else:
raise TypeError("invalid function parameters: '{}'".format(parameters))
body = self.parse_body(body, use_return=True)
return params, vararg, body
def parse_target(self, target):
if clj.is_symbol(target):
target = target.name
elif clj.is_symbol_vector(target):
target = tuple([t.name for t in target.items])
else:
raise TypeError("invalid target in assignment: '{}'".format(target))
return target
def visit_apply(self, function, *args):
function = function.visit(self)
args = [arg.visit(self) for arg in args]
return AstCall(function, args)
def visit_concat(self, *seqs):
seqs = [s.visit(self) for s in seqs]
if len(seqs) == 0:
return AstValue(None)
elif len(seqs) == 1:
return seqs[0]
else:
return AstCall(AstSymbol('clojure.core.concat'), seqs)
def visit_cond(self, *clauses):
if len(clauses) == 0:
return makeBody([])
if len(clauses) % 2 != 0:
raise SyntaxError("the number of clauses in 'cond' must be even")
clauses = list(reversed(clauses))
result = clauses[0].visit(self)
if not clj.is_symbol(clauses[1], ':else'):
result = makeIf(clauses[1].visit(self), result, None)
for test, body in zip(clauses[3::2], clauses[2::2]):
result = makeIf(test.visit(self), body.visit(self), result)
return result
def visit_conj(self, sequence, *elements):
sequence = sequence.visit(self)
elements = [e.visit(self) for e in elements]
result = sequence
for element in elements:
result = AstCall(AstSymbol('clojure.core.conj'), [result, element])
return result
def visit_cons(self, element, sequence):
element = element.visit(self)
sequence = sequence.visit(self)
return AstCall(AstSymbol('clojure.core.cons'), [element, sequence])
def visit_dec(self, number):
return AstBinary(number.visit(self), '-', AstValue(1))
def visit_def(self, target, source):
target = self.parse_target(target)
source = source.visit(self)
return makeDef(target, source, True)
def visit_defn(self, name, parameters, *body):
if clj.is_symbol(name):
name = name.name
else:
raise TypeError("function name expected instead of '{}'".format(name))
if clj.is_string(parameters) and len(body) > 1 and clj.is_symbol_vector(body[0]):
doc_string, parameters = parameters, body[0]
body = body[1:]
else:
doc_string = None
params, vararg, body = self.parse_function(parameters, body)
return makeDef(name, AstFunction(name, params, body, vararg=vararg, doc_string=doc_string), True)
def visit_do(self, body):
return self.parse_body(body)
def visit_doseq(self, bindings, *body):
targets, sources = self.parse_bindings(bindings)
result = self.parse_body(body)
for target, source in zip(reversed(targets), reversed(sources)):
result = makeFor(target, source, result)
return result
def visit_drop(self, count, sequence):
count = count.visit(self)
sequence = sequence.visit(self)
return AstSlice(sequence, count, None)
def visit_first(self, sequence):
sequence = sequence.visit(self)
return AstSubscript(sequence, AstValue(0))
def visit_fn(self, parameters, *body):
params, vararg, body = self.parse_function(parameters, body)
return AstFunction(None, params, body, vararg=vararg)
def visit_for(self, bindings, *body):
targets, sources = self.parse_bindings(bindings)
result = self.parse_body(body)
for target, source in zip(reversed(targets), reversed(sources)):
result = makeListFor(target, source, result)
return result
def visit_get(self, sequence, index, *defaults):
sequence = sequence.visit(self)
index = index.visit(self)
if len(defaults) == 0:
default = None
elif len(defaults) == 1:
default = defaults[0]
else:
raise TypeError("too many arguments for 'get' ({} given)".format(len(defaults) + 2))
if isinstance(sequence, AstSlice) and sequence.stop is None and is_integer(index) and default is None:
start = sequence.start_as_int
if start is not None:
return AstSubscript(sequence.base, AstValue(start + index.value))
return AstSubscript(sequence, index, default)
def visit_if(self, test, body, *else_body):
if len(else_body) > 1:
raise SyntaxError("too many arguments for 'if' ({} given)".format(len(else_body) + 2))
test = test.visit(self)
body = body.visit(self)
else_body = else_body[0].visit(self) if len(else_body) == 1 else None
return makeIf(test, body, else_body)
def visit_if_not(self, test, body, *else_body):
if len(else_body) == 1:
return self.visit_if(test, else_body[0], body)
elif len(else_body) == 0:
return self.visit_if(clj.Form([clj.Symbol('not'), test]), body, else_body[0])
else:
raise SyntaxError("too many arguments for 'if-not' ({} given)".format(len(else_body)+2))
def visit_inc(self, number):
return AstBinary(number.visit(self), '+', AstValue(1))
def visit_last(self, sequence):
sequence = sequence.visit(self)
return AstSubscript(sequence, AstValue(-1))
def visit_let(self, bindings, *body):
targets, sources = self.parse_bindings(bindings)
return makeLet(targets, sources, self.parse_body(body))
def visit_nth(self, sequence, index):
sequence = self.visit(sequence)
index = self.visit(index)
if isinstance(sequence, AstSlice) and sequence.stop is None and is_integer(index):
start = sequence.start_as_int
if start is not None:
return AstSubscript(sequence.base, AstValue(start + index.value))
return AstSubscript(sequence, index)
def visit_observe(self, dist, value):
return AstObserve(dist.visit(self), value.visit(self))
def visit_put(self, sequence, index, value):
sequence = self.visit(sequence)
index = self.visit(index)
value = self.visit(value)
return AstCall(AstSymbol('list.put'), [sequence, index, value], is_builtin=True)
def visit_repeat(self, count, value):
value = value.visit(self)
if clj.is_integer(count):
n = count.value
return makeVector([value] * n)
else:
count = count.visit(self)
return AstBinary(AstVector([value]), '*', count)
def visit_repeatedly(self, count, function):
function = function.visit(self)
if clj.is_integer(count):
n = count.value
return makeVector([AstCall(function, [])] * n)
else:
count = count.visit(self)
return AstBinary(AstVector([AstCall(function, [])]), '*', count)
def visit_require(self, *args):
result = []
for arg in args:
name, as_name = self.parse_alias(arg)
if name is None:
raise SyntaxError("cannot import '{}'".format(arg))
result.append(AstImport(name, [], as_name))
if len(result) == 1:
return result[0]
else:
return makeBody(result)
def visit_rest(self, sequence):
sequence = sequence.visit(self)
if isinstance(sequence, AstSlice):
start = sequence.start_as_int
if start is not None:
return AstSlice(sequence.base, AstValue(start + 1), sequence.stop)
return AstSlice(sequence, AstValue(1), None)
def visit_sample(self, dist, *size):
if len(size) == 1:
size = self.visit(size[0])
else:
if len(size) > 0:
raise TypeError("sample() expected 1 or 2 arguments ({} given)".format(len(size)+1))
size = None
return AstSample(self.visit(dist), size=size)
def visit_second(self, sequence):
sequence = sequence.visit(self)
return AstSubscript(sequence, AstValue(1))
def visit_setv(self, target, source):
target = self.parse_target(target)
source = source.visit(self)
return AstDef(target, source)
def visit_subvec(self, sequence, start, *stop):
if len(stop) > 1:
raise TypeError("subvec() takes at most three arguments ({} given)".format(len(stop) + 2))
sequence = sequence.visit(self)
start = start.visit(self)
stop = stop[0].visit(self) if len(stop) > 0 else None
return AstSlice(sequence, start, stop)
def visit_sym_arrow(self, init_arg, *functions):
result = init_arg
for arg in functions:
if clj.is_form(arg):
result = clj.Form([arg.head, result] + arg.tail)
else:
result = clj.Form([arg, result])
return result.visit(self)
def visit_sym_double_arrow(self, init_arg, *functions):
result = init_arg
for arg in functions:
if clj.is_form(arg):
result = clj.Form(arg.items + [result])
else:
result = clj.Form([arg, result])
return result.visit(self)
def visit_take(self, count, sequence):
count = count.visit(self)
sequence = sequence.visit(self)
return AstSlice(sequence, None, count)
def visit_use(self, *args):
result = []
for arg in args:
name, as_name = self.parse_alias(arg)
if name is None:
raise SyntaxError("cannot import '{}'".format(arg))
result.append(AstImport(name, ['*']))
if len(result) == 1:
return result[0]
else:
return makeBody(result)
def visit_vector(self, *items):
items = [item.visit(self) for item in items]
return makeVector(items)
def visit_while(self, test, *body):
test = test.visit(self)
body = self.parse_body(body)
return AstWhile(test, body)
###
def visit_form_form(self, node:clj.Form):
function = node.head.visit(self)
args = [item.visit(self) for item in node.tail]
if isinstance(function, AstSymbol):
n = function.name
if n in ['+', '-', 'not'] and len(args) == 1:
return AstUnary(n, args[0])
elif n in ['+', '-', '*', '/', 'and', 'or', 'bit-and', 'bit-or', 'bit-xor']:
if n == 'bit-and': n = '&'
if n == 'bit-or': n = '|'
if n == 'bit-xor': n = '^'
if len(args) == 0:
return AstValue(0 if n in ['+', '-'] else 1)
result = args[0]
for arg in args[1:]:
result = AstBinary(result, n, arg)
return result
elif n in ['<', '>', '<=', '>=', '=', '!=', '==', 'not=']:
if n == 'not=': n = '!='
if n == '=': n = '=='
if len(args) != 2:
raise TypeError("comparison requires exactly two arguments ({} given)".format(len(args)))
return AstCompare(args[0], n, args[1])
elif n == '.':
if len(args) < 2:
raise TypeError("attribute access requires at least two arguments ({} given)".format(len(args)))
result = args[0]
for arg in args[1:]:
if isinstance(arg, AstSymbol):
result = AstAttribute(result, | |
"昌": 25592,
":0": 25593,
"ALE": 25594,
"AYA": 25595,
"besi": 25596,
"obraz": 25597,
"tsin": 25598,
"zinho": 25599,
"▁Hardware": 25600,
"▁TAN": 25601,
"▁dre": 25602,
"▁flac": 25603,
"▁pretty": 25604,
"▁while": 25605,
"-03-": 25606,
"IKO": 25607,
"IMU": 25608,
"Khmer": 25609,
"dash": 25610,
"have": 25611,
"zeli": 25612,
"▁Choose": 25613,
"▁INFO": 25614,
"▁Isra": 25615,
"▁Paradox": 25616,
"▁Secretary": 25617,
"▁each": 25618,
"▁fix": 25619,
"▁nie": 25620,
"▁toy": 25621,
"▁ts": 25622,
"bog": 25623,
"roj": 25624,
"ный": 25625,
"▁EXTRA": 25626,
"▁Helge": 25627,
"▁Institution": 25628,
"▁XXV": 25629,
"▁converti": 25630,
"野": 25631,
"equip": 25632,
"indo": 25633,
"mama": 25634,
"rings": 25635,
"▁Holz": 25636,
"▁Mwa": 25637,
"▁Nouveau": 25638,
"▁american": 25639,
"▁ea": 25640,
"▁friends": 25641,
"▁serpent": 25642,
"Jaa": 25643,
"LJ": 25644,
"cso": 25645,
"vd": 25646,
"▁ALL": 25647,
"▁Klan": 25648,
"▁Kristi": 25649,
"▁Occidental": 25650,
"▁PDK": 25651,
"▁Serial": 25652,
"▁intra": 25653,
"ała": 25654,
"dhu": 25655,
"eggia": 25656,
"imu": 25657,
"lado": 25658,
"tension": 25659,
"uwar": 25660,
"▁$10": 25661,
"▁Dib": 25662,
"▁GEL": 25663,
"▁Romantic": 25664,
"▁Scroll": 25665,
"▁dollars": 25666,
"▁effect": 25667,
"▁gro": 25668,
"▁lamp": 25669,
"▁recta": 25670,
"▁wireless": 25671,
"60,000": 25672,
"LET": 25673,
"engine": 25674,
"metra": 25675,
"rli": 25676,
"ruo": 25677,
"wend": 25678,
"új": 25679,
"▁Allgemein": 25680,
"▁Dü": 25681,
"▁assim": 25682,
"▁brit": 25683,
"▁rugi": 25684,
"郎": 25685,
"buku": 25686,
"kok": 25687,
"raden": 25688,
"resor": 25689,
"uu": 25690,
"▁Besi": 25691,
"▁Gogo": 25692,
"▁mann": 25693,
"▁wel": 25694,
"か": 25695,
"DRI": 25696,
"DRO": 25697,
"bici": 25698,
"dava": 25699,
"dels": 25700,
"demo": 25701,
"quot": 25702,
"▁Francia": 25703,
"▁RES": 25704,
"▁bp": 25705,
"▁express": 25706,
"▁member": 25707,
"▁multimedia": 25708,
"▁neem": 25709,
"▁sans": 25710,
"▁tak": 25711,
"▁using": 25712,
"▁vent": 25713,
"SIK": 25714,
"cios": 25715,
"sok": 25716,
"tely": 25717,
"ö": 25718,
"▁Quiz": 25719,
"▁rs": 25720,
"黎": 25721,
"glut": 25722,
"haan": 25723,
"heir": 25724,
"iding": 25725,
"ppan": 25726,
"▁Voor": 25727,
"▁audi": 25728,
"Standard": 25729,
"angarap": 25730,
"leis": 25731,
"melt": 25732,
"missa": 25733,
"▁Bodrum": 25734,
"▁Kadir": 25735,
"▁Patria": 25736,
"▁VER": 25737,
"阳": 25738,
"BUS": 25739,
"affi": 25740,
"aquí": 25741,
"cite": 25742,
"inom": 25743,
"▁GER": 25744,
"▁Staten": 25745,
"▁mando": 25746,
"▁moni": 25747,
"▁original": 25748,
"bent": 25749,
"pey": 25750,
"qab": 25751,
"tiu": 25752,
"▁Armen": 25753,
"▁Cathedral": 25754,
"▁Príncipe": 25755,
"▁Tura": 25756,
"▁button": 25757,
"▁obi": 25758,
"▁quin": 25759,
"▁С": 25760,
"懿": 25761,
"Allah": 25762,
"KOL": 25763,
"andin": 25764,
"plic": 25765,
"strand": 25766,
"strup": 25767,
"zom": 25768,
"▁Breakfast": 25769,
"▁Garda": 25770,
"▁Jörg": 25771,
"▁Kela": 25772,
"▁Kör": 25773,
"▁Maks": 25774,
"▁Sains": 25775,
"▁better": 25776,
"▁messenger": 25777,
"▁resto": 25778,
"▁sports": 25779,
"▁tort": 25780,
"▁women": 25781,
"景": 25782,
"祖": 25783,
"6.00": 25784,
"Text": 25785,
"tering": 25786,
"zg": 25787,
"▁Denmark": 25788,
"▁because": 25789,
"▁direct": 25790,
"Holland": 25791,
"legra": 25792,
"stava": 25793,
"tke": 25794,
"tā": 25795,
"ção": 25796,
"о": 25797,
"▁Videos": 25798,
"▁bv": 25799,
"▁libero": 25800,
"dhe": 25801,
"fili": 25802,
"ngga": 25803,
"rund": 25804,
"▁Aarhus": 25805,
"▁Haupt": 25806,
"▁Pilipinas": 25807,
"▁cala": 25808,
"▁det": 25809,
"Lie": 25810,
"barth": 25811,
"dust": 25812,
"mies": 25813,
"posa": 25814,
"ziya": 25815,
"üyü": 25816,
"▁Apr": 25817,
"▁Deutsch": 25818,
"▁Ember": 25819,
"英": 25820,
"...............": 25821,
"abbat": 25822,
"czka": 25823,
"kering": 25824,
"plac": 25825,
"prim": 25826,
"▁Kerk": 25827,
"▁Zlín": 25828,
"▁claus": 25829,
"▁karo": 25830,
"▁pal": 25831,
"▁pale": 25832,
"02.2017": 25833,
"jil": 25834,
"liq": 25835,
"menta": 25836,
"▁Insa": 25837,
"▁Ist": 25838,
"▁Presse": 25839,
"▁Veg": 25840,
"▁Wana": 25841,
"▁olymp": 25842,
"▁since": 25843,
"ROL": 25844,
"abil": 25845,
"atura": 25846,
"gged": 25847,
"gust": 25848,
"hik": 25849,
"illant": 25850,
"inform": 25851,
"jem": 25852,
"paka": 25853,
"tuba": 25854,
"voor": 25855,
"ša": 25856,
"▁GRA": 25857,
"▁Saat": 25858,
"▁Sweden": 25859,
"▁bed": 25860,
"▁qi": 25861,
"▁research": 25862,
"▁Çe": 25863,
".......": 25864,
"ELA": 25865,
"RAR": 25866,
"ixe": 25867,
"lj": 25868,
"mem": 25869,
"▁Bookmark": 25870,
"▁barem": 25871,
"▁dell": 25872,
"▁hus": 25873,
"▁pap": 25874,
"ANDA": 25875,
"Java": 25876,
"ayn": 25877,
"format": 25878,
"gym": 25879,
"itza": 25880,
"verk": 25881,
"▁(40)": 25882,
"▁(50)": 25883,
"▁Alman": 25884,
"▁Batang": 25885,
"▁Besar": 25886,
"▁Fina": 25887,
"▁Itali": 25888,
"▁Sebastián": 25889,
"▁hill": 25890,
"▁lost": 25891,
"▁reng": 25892,
"[19]": 25893,
"[30]": 25894,
"gulo": 25895,
"mla": 25896,
"°": 25897,
"ós": 25898,
"▁ADA": 25899,
"▁Ime": 25900,
"▁Individual": 25901,
"▁Nationale": 25902,
"▁Vse": 25903,
"▁capital": 25904,
"▁frontal": 25905,
"▁sat": 25906,
"皇帝": 25907,
"YAL": 25908,
"^": 25909,
"leven": 25910,
"reb": 25911,
"ʻ": 25912,
"▁Claro": 25913,
"▁Huis": 25914,
"▁Nej": 25915,
"▁Streaming": 25916,
"▁Uzun": 25917,
"▁canto": 25918,
"▁rei": 25919,
"▁twist": 25920,
"ABO": 25921,
"EVER": 25922,
"VET": 25923,
"balo": 25924,
"eten": 25925,
"prove": 25926,
"▁Dire": 25927,
"▁Kral": 25928,
"▁Perdana": 25929,
"▁Sergej": 25930,
"▁Syd": 25931,
"▁balans": 25932,
"▁options": 25933,
"▁pena": 25934,
"年": 25935,
"abot": 25936,
"halter": 25937,
"imper": 25938,
"lique": 25939,
"ács": 25940,
"▁Igual": 25941,
"▁Yara": 25942,
"▁anna": 25943,
"▁bright": 25944,
"▁give": 25945,
"▁labo": 25946,
"▁law": 25947,
"八": 25948,
"章": 25949,
"BIZ": 25950,
"IER": 25951,
"frac": 25952,
"hdi": 25953,
"iej": 25954,
"isht": 25955,
"koz": 25956,
"torii": 25957,
"what": 25958,
"▁Arad": 25959,
"▁Dema": 25960,
"▁Marzo": 25961,
"▁levi": 25962,
"▁mana": 25963,
"-06-": 25964,
"SPEC": 25965,
"Vlaanderen": 25966,
"natural": 25967,
"riet": 25968,
"▁Customer": 25969,
"▁Domina": 25970,
"▁FRE": 25971,
"▁Tev": 25972,
"▁edu": 25973,
"▁santa": 25974,
"古": 25975,
"Software": 25976,
"agrada": 25977,
"bhra": 25978,
"dega": 25979,
"fed": 25980,
"▁11.00": 25981,
"▁Iga": 25982,
"▁STA": 25983,
"▁ach": 25984,
"▁ago": 25985,
"▁cl": 25986,
"▁lives": 25987,
"▁rustic": 25988,
"▁sich": 25989,
"宣": 25990,
"寺": 25991,
"ISH": 25992,
"project": 25993,
"rodo": 25994,
"ópolis": 25995,
"▁Tug": 25996,
"▁fac": 25997,
"▁special": 25998,
"8.00": 25999,
"ndum": 26000,
"С": 26001,
"▁Keli": 26002,
"▁Klip": 26003,
"▁Maska": 26004,
"▁Medio": 26005,
"▁Türk": 26006,
"300,000": 26007,
"REL": 26008,
"aram": 26009,
"mă": 26010,
"ości": 26011,
"person": 26012,
"thir": 26013,
"tla": 26014,
"▁Marke": 26015,
"▁Punt": 26016,
"▁phr": 26017,
"さ": 26018,
"Ing": 26019,
"RTI": 26020,
"ingar": 26021,
"kking": 26022,
"ucht": 26023,
"vaka": 26024,
"▁Danes": 26025,
"▁Esti": 26026,
"▁GDPR": 26027,
"▁LEO": 26028,
"▁Myśl": 26029,
"▁Osteo": 26030,
"▁Pedr": 26031,
"▁cant": 26032,
"▁ret": 26033,
"容": 26034,
"Raja": 26035,
"TAY": 26036,
"pale": 26037,
"unti": 26038,
"▁Hj": 26039,
"▁Mittel": 26040,
"▁Oso": 26041,
"▁Schloss": 26042,
"▁hole": 26043,
"▁six": 26044,
"▁А": 26045,
"LEN": 26046,
"fila": 26047,
"isten": 26048,
"raca": 26049,
"rasi": 26050,
"rhythm": 26051,
"▁1394": 26052,
"▁Eskişehir": 26053,
"▁Ghosh": 26054,
"▁Innsbruck": 26055,
"▁Patrik": 26056,
"▁cuba": 26057,
"▁hic": 26058,
"▁indo": 26059,
"▁une": 26060,
"慈": 26061,
"有": 26062,
"DRA": 26063,
"alter": 26064,
"icis": 26065,
"▁Schreib": 26066,
"▁Toda": 26067,
"▁latte": 26068,
"▁tinc": 26069,
"UO": 26070,
"archae": 26071,
"felder": 26072,
"jek": 26073,
"meng": 26074,
"product": 26075,
"ulle": 26076,
"unar": 26077,
"veti": 26078,
"şan": 26079,
"▁Dewi": 26080,
"▁JU": 26081,
"▁Kranj": 26082,
"▁Norway": 26083,
"▁Slova": 26084,
"▁Stati": 26085,
"▁Suárez": 26086,
"▁hed": 26087,
"▁mell": 26088,
"▁orbit": 26089,
"▁round": 26090,
"▁stan": 26091,
"▁Ž": 26092,
"に": 26093,
"FORM": 26094,
"RDI": 26095,
"aint": 26096,
"eremo": 26097,
"randi": 26098,
"sound": 26099,
"uros": 26100,
"▁Aller": 26101,
"▁Mongolia": 26102,
"▁Patent": 26103,
"▁Quand": 26104,
"興": 26105,
"gji": 26106,
"labora": 26107,
"éz": 26108,
"▁BAB": 26109,
"▁Hiri": 26110,
"▁Koha": 26111,
"▁Nicolae": 26112,
"▁Puna": 26113,
"▁Quid": 26114,
"▁Simula": 26115,
"▁Traian": 26116,
"▁could": 26117,
"▁equi": 26118,
"▁fille": 26119,
"▁production": 26120,
"縣": 26121,
"ETE": 26122,
"MIR": 26123,
"ONI": 26124,
"SZ": 26125,
"arsa": 26126,
"fjall": 26127,
"iones": 26128,
"riat": 26129,
"route": 26130,
"straße": 26131,
"ukan": 26132,
"”": 26133,
"▁1397": 26134,
"▁Brak": 26135,
"▁Kassim": 26136,
"▁Litera": 26137,
"▁Specialist": 26138,
"▁Tirol": 26139,
"▁bale": 26140,
"▁incur": 26141,
"▁obes": 26142,
"▁salam": 26143,
"biy": 26144,
"ikka": 26145,
"kimi": 26146,
"koma": 26147,
"mping": 26148,
"âr": 26149,
"▁Alarm": 26150,
"▁INTER": 26151,
"▁Montserrat": 26152,
"▁Skal": 26153,
"~": 26154,
"ECI": 26155,
"bd": 26156,
"stane": 26157,
"▁Bayram": 26158,
"▁Goiás": 26159,
"▁Paşa": 26160,
"▁ia": 26161,
"▁wild": 26162,
"Bud": 26163,
"csa": 26164,
"gabal": 26165,
"isana": 26166,
"nière": 26167,
"rky": 26168,
"treb": 26169,
"▁Edirne": 26170,
"▁Energia": 26171,
"▁Laga": 26172,
"▁Proti": 26173,
"▁Televi": 26174,
"▁Viena": 26175,
"▁bara": 26176,
"▁protected": 26177,
"▁suis": 26178,
"▁yum": 26179,
"TRE": 26180,
"efaso": 26181,
"ientes": 26182,
"tadi": 26183,
"tention": 26184,
"unum": 26185,
"ович": 26186,
"▁CER": 26187,
"▁Kalli": 26188,
"▁VAL": 26189,
"▁Voices": 26190,
"▁calor": 26191,
"▁lib": 26192,
"▁lips": 26193,
"弘": 26194,
"OOM": 26195,
"apan": 26196,
"dijk": 26197,
"handel": 26198,
"huizen": 26199,
"rangan": 26200,
"Ẽ": 26201,
"▁GUI": 26202,
"▁amor": 26203,
"▁bonus": 26204,
"▁contra": 26205,
"▁diff": 26206,
"▁diffusa": 26207,
"▁earth": 26208,
"▁euros": 26209,
"▁method": 26210,
"Report": 26211,
"flag": 26212,
"mely": 26213,
"modi": 26214,
"onge": 26215,
"zis": 26216,
"▁Institut": 26217,
"▁Konst": 26218,
"▁Pembe": 26219,
"▁Provo": 26220,
"▁Puzzle": 26221,
"▁gali": 26222,
"▁pont": 26223,
"▁print": 26224,
"▁private": 26225,
"▁these": 26226,
"儀": 26227,
"03.2018": 26228,
"ocht": 26229,
"security": 26230,
"▁Ezek": 26231,
| |
# Adapted from https://github.com/sergionr2/RacingRobot
# Author: <NAME>
import argparse
import os
import time
import cv2
from threading import Event, Thread
import numpy as np
import pygame
from pygame.locals import *
from stable_baselines.bench import Monitor
from stable_baselines.common.vec_env import VecFrameStack, VecNormalize, DummyVecEnv
from config import MIN_STEERING, MAX_STEERING, MIN_THROTTLE, MAX_THROTTLE, \
LEVEL, N_COMMAND_HISTORY, TEST_FRAME_SKIP, ENV_ID, FRAME_SKIP, \
SHOW_IMAGES_TELEOP, REWARD_CRASH, CRASH_SPEED_WEIGHT
from donkey_gym.envs.vae_env import DonkeyVAEEnv
from utils.utils import ALGOS, get_latest_run_id, load_vae
from .recorder import Recorder
UP = (MAX_THROTTLE, 0)
LEFT = (0, 1)
RIGHT = (0, -1)
DOWN = (-MAX_THROTTLE * 1.0, 0)
STOP = (0, 0)
KEY_CODE_SPACE = 32
MAX_TURN = 1
# Smoothing constants
STEP_THROTTLE = 0.3
STEP_TURN = 0.4
TELEOP_RATE = 1 / 60 # 60 fps
GREEN = (72, 205, 40)
RED = (205, 39, 46)
GREY = (187, 179, 179)
BLACK = (36, 36, 36)
WHITE = (230, 230, 230)
ORANGE = (200, 110, 0)
moveBindingsGame = {
K_UP: UP,
K_LEFT: LEFT,
K_RIGHT: RIGHT,
K_DOWN: DOWN
}
pygame.font.init()
FONT = pygame.font.SysFont('Open Sans', 25)
SMALL_FONT = pygame.font.SysFont('Open Sans', 20)
KEY_MIN_DELAY = 0.4
MAX_N_OUT_OF_BOUND = FRAME_SKIP
def control(x, theta, control_throttle, control_steering):
"""
Smooth control.
:param x: (float)
:param theta: (float)
:param control_throttle: (float)
:param control_steering: (float)
:return: (float, float)
"""
target_throttle = x
target_steering = MAX_TURN * theta
if target_throttle > control_throttle:
control_throttle = min(target_throttle, control_throttle + STEP_THROTTLE)
elif target_throttle < control_throttle:
control_throttle = max(target_throttle, control_throttle - STEP_THROTTLE)
else:
control_throttle = target_throttle
if target_steering > control_steering:
control_steering = min(target_steering, control_steering + STEP_TURN)
elif target_steering < control_steering:
control_steering = max(target_steering, control_steering - STEP_TURN)
else:
control_steering = target_steering
return control_throttle, control_steering
class TeleopEnv(object):
def __init__(self, env, model=None, is_recording=False,
is_training=False, deterministic=True):
super(TeleopEnv, self).__init__()
self.env = env
self.model = model
self.need_reset = False
self.is_manual = True
self.is_recording = is_recording
self.is_training = is_training
# For keyboard trigger
self.fill_buffer = False
# For display
self.is_filling = False
self.current_obs = None
self.exit_event = Event()
self.done_event = Event()
self.ready_event = Event()
# For testing
self.deterministic = deterministic
self.window = None
self.process = None
self.action = None
self.observation_space = env.observation_space
self.action_space = env.action_space
self.donkey_env = None
self.n_out_of_bound = 0
self.current_image = None
self.image_surface = None
self.decoded_surface = None
self.start_process()
def start_process(self):
"""Start preprocessing process"""
self.process = Thread(target=self.main_loop)
# Make it a deamon, so it will be deleted at the same time
# of the main process
self.process.daemon = True
self.process.start()
def step(self, action):
self.action = action
self.current_obs, reward, done, info = self.env.step(action)
# Overwrite done
if self.done_event.is_set():
done = False
# Negative reward for several steps
if self.n_out_of_bound < MAX_N_OUT_OF_BOUND:
self.n_out_of_bound += 1
else:
done = True
# penalize the agent for getting off the road fast
norm_throttle = (action[1] - MIN_THROTTLE) / (MAX_THROTTLE - MIN_THROTTLE)
reward = REWARD_CRASH - CRASH_SPEED_WEIGHT * norm_throttle
else:
done = False
return self.current_obs, reward, done, info
def render(self, mode='human'):
return self.env.render(mode)
def reset(self):
self.n_out_of_bound = 0
# Disable reset after init
if self.need_reset:
self.need_reset = False
return self.env.reset()
else:
# Zero speed, neutral angle
self.donkey_env.controller.take_action([0, 0])
return self.current_obs
def wait_for_teleop_reset(self):
self.ready_event.wait()
return self.reset()
def exit(self):
self.env.reset()
self.donkey_env.exit_scene()
def wait(self):
self.process.join()
def main_loop(self):
# Pygame require a window
pygame.init()
self.window = pygame.display.set_mode((800, 500), RESIZABLE)
end = False
control_throttle, control_steering = 0, 0
action = [control_steering, control_throttle]
self.update_screen(action)
donkey_env = self.env
# Unwrap env
if isinstance(donkey_env, Recorder):
donkey_env = donkey_env.env
while isinstance(donkey_env, VecNormalize) or isinstance(donkey_env, VecFrameStack):
donkey_env = donkey_env.venv
if isinstance(donkey_env, DummyVecEnv):
donkey_env = donkey_env.envs[0]
if isinstance(donkey_env, Monitor):
donkey_env = donkey_env.env
assert isinstance(donkey_env, DonkeyVAEEnv), print(donkey_env)
self.donkey_env = donkey_env
last_time_pressed = {'space': 0, 'm': 0, 't': 0, 'b': 0, 'o': 0}
self.current_obs = self.reset()
if self.model is not None:
# Prevent error (uninitialized value)
self.model.n_updates = 0
while not end:
x, theta = 0, 0
keys = pygame.key.get_pressed()
for keycode in moveBindingsGame.keys():
if keys[keycode]:
x_tmp, th_tmp = moveBindingsGame[keycode]
x += x_tmp
theta += th_tmp
if keys[K_SPACE] and (time.time() - last_time_pressed['space']) > KEY_MIN_DELAY:
self.is_recording = not self.is_recording
if isinstance(self.env, Recorder):
self.env.toggle_recording()
# avoid multiple key press
last_time_pressed['space'] = time.time()
if keys[K_m] and (time.time() - last_time_pressed['m']) > KEY_MIN_DELAY:
self.is_manual = not self.is_manual
# avoid multiple key press
last_time_pressed['m'] = time.time()
if self.is_training:
if self.is_manual:
# Stop training
self.ready_event.clear()
self.done_event.set()
else:
# Start training
self.done_event.clear()
self.ready_event.set()
if keys[K_t] and (time.time() - last_time_pressed['t']) > KEY_MIN_DELAY:
self.is_training = not self.is_training
# avoid multiple key press
last_time_pressed['t'] = time.time()
if keys[K_b] and (time.time() - last_time_pressed['b']) > KEY_MIN_DELAY:
self.fill_buffer = not self.fill_buffer
# avoid multiple key press
last_time_pressed['b'] = time.time()
if keys[K_r]:
self.current_obs = self.env.reset()
if keys[K_o]:
if (self.is_manual
and self.model is not None
and hasattr(self.model, 'optimize')
and (time.time() - last_time_pressed['o']) > KEY_MIN_DELAY):
print("Optimizing")
self.model.optimize(len(self.model.replay_buffer), None, self.model.learning_rate(1))
last_time_pressed['o'] = time.time()
if keys[K_l]:
self.env.reset()
self.donkey_env.exit_scene()
self.need_reset = True
# Smooth control for teleoperation
control_throttle, control_steering = control(x, theta, control_throttle, control_steering)
# Send Orders
if self.model is None or self.is_manual:
t = (control_steering + MAX_TURN) / (2 * MAX_TURN)
steering_order = MIN_STEERING * t + MAX_STEERING * (1 - t)
self.action = [steering_order, control_throttle]
elif self.model is not None and not self.is_training:
self.action, _ = self.model.predict(self.current_obs, deterministic=self.deterministic)
self.is_filling = False
if not (self.is_training and not self.is_manual):
if self.is_manual and not self.fill_buffer:
donkey_env.controller.take_action(self.action)
self.current_obs, reward, done, info = donkey_env.observe()
self.current_obs, _, _, _ = donkey_env.postprocessing_step(self.action, self.current_obs,
reward, done, info)
else:
if self.fill_buffer:
old_obs = self.current_obs
self.current_obs, reward, done, _ = self.env.step(self.action)
# Store the transition in the replay buffer
if self.fill_buffer and hasattr(self.model, 'replay_buffer'):
assert old_obs is not None
if old_obs.shape[1] == self.current_obs.shape[1]:
self.is_filling = True
self.model.replay_buffer.add(old_obs, self.action, reward, self.current_obs, float(done))
if isinstance(self.env, Recorder):
self.env.save_image()
self.current_image = self.env.render(mode='rgb_array')
self.update_screen(self.action)
for event in pygame.event.get():
if event.type == QUIT or event.type == KEYDOWN and event.key in [K_ESCAPE, K_q]:
end = True
pygame.display.flip()
# Limit FPS
pygame.time.Clock().tick(1 / TELEOP_RATE)
self.ready_event.set()
self.exit_event.set()
def write_text(self, text, x, y, font, color=GREY):
text = str(text)
text = font.render(text, True, color)
self.window.blit(text, (x, y))
def clear(self):
self.window.fill((0, 0, 0))
def update_screen(self, action):
self.clear()
steering, throttle = action
self.write_text('Throttle: {:.2f}, Steering: {:.2f}'.format(throttle, steering), 20, 0, FONT, WHITE)
help_str = 'Use arrow keys to move, q or ESCAPE to exit.'
self.write_text(help_str, 20, 50, SMALL_FONT)
help_2 = 'space key: toggle recording -- m: change mode -- r: reset -- l: reset track'
self.write_text(help_2, 20, 100, SMALL_FONT)
if isinstance(self.env, Recorder):
self.write_text('Recording Status:', 20, 150, SMALL_FONT, WHITE)
if self.is_recording:
text, text_color = 'RECORDING', RED
else:
text, text_color = 'NOT RECORDING', GREEN
self.write_text(text, 200, 150, SMALL_FONT, text_color)
self.write_text('Mode:', 20, 200, SMALL_FONT, WHITE)
if self.is_manual:
text, text_color = 'MANUAL', GREEN
else:
text, text_color = 'AUTONOMOUS', ORANGE
self.write_text(text, 200, 200, SMALL_FONT, text_color)
self.write_text('Training Status:', 20, 250, SMALL_FONT, WHITE)
if self.is_training:
text, text_color = 'TRAINING', RED
else:
text, text_color = 'TESTING', GREEN
self.write_text(text, 200, 250, SMALL_FONT, text_color)
if self.is_filling:
text, text_color = 'FILLING THE BUFFER', RED
else:
text, text_color = '', GREEN
self.write_text(text, 200, 300, SMALL_FONT, text_color)
if self.current_image is not None and SHOW_IMAGES_TELEOP:
current_image = np.swapaxes(self.current_image, 0, 1)
if self.image_surface is None:
self.image_surface = pygame.pixelcopy.make_surface(current_image)
pygame.pixelcopy.array_to_surface(self.image_surface, current_image)
self.window.blit(self.image_surface, (20, 350))
if (self.donkey_env is not None
and self.donkey_env.vae is not None
and self.current_obs is not None
and SHOW_IMAGES_TELEOP):
vae_dim = self.donkey_env.vae.z_size
encoded = self.current_obs[:, :vae_dim]
reconstructed_image = self.donkey_env.vae.decode(encoded)[0]
# Convert BGR to RGB
# reconstructed_image = reconstructed_image[:, :, ::-1]
reconstructed_image = np.swapaxes(reconstructed_image, 0, 1)
if self.decoded_surface is None:
self.decoded_surface = pygame.pixelcopy.make_surface(reconstructed_image)
pygame.pixelcopy.array_to_surface(self.decoded_surface, reconstructed_image)
self.window.blit(self.decoded_surface, (220, 350))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--folder', help='Log folder', type=str, default='logs')
parser.add_argument('--record-folder', help='Record folder, where images are saved', type=str,
default='logs/recorded_data/')
parser.add_argument('--algo', help='RL Algorithm', default='',
type=str, required=False, choices=list(ALGOS.keys()))
parser.add_argument('-n', '--n-timesteps', help='number of timesteps', default=1000,
type=int)
parser.add_argument('--exp-id', help='Experiment ID (-1: no exp folder, 0: latest)', default=0,
type=int)
parser.add_argument('-vae', '--vae-path', help='Path to saved VAE', type=str, default='')
args = parser.parse_args()
algo = args.algo
folder = args.folder
model = None
vae = None
os.environ['DONKEY_NAME'] = "my_robot1234"
os.environ['DONKEY_MQTT_BROKER'] = "192.168.0.24"
if algo != '':
if args.exp_id == 0:
args.exp_id = get_latest_run_id(os.path.join(folder, algo), ENV_ID)
print('Loading latest experiment, id={}'.format(args.exp_id))
# Sanity checks
if args.exp_id > 0:
log_path = os.path.join(folder, algo, '{}_{}'.format(ENV_ID, args.exp_id))
else:
log_path = os.path.join(folder, algo)
model_path = "{}/{}.pkl".format(log_path, ENV_ID)
assert os.path.isdir(log_path), "The {} folder was not found".format(log_path)
assert os.path.isfile(model_path), "No model found for {} on {}, path: {}".format(algo, ENV_ID, model_path)
model = ALGOS[algo].load(model_path)
if args.vae_path != '':
print("Loading VAE ...")
vae = load_vae(args.vae_path)
if | |
:
"""""",
}, # column
"multicastVlanStatusTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.26.3",
"status" : "current",
"description" :
"""""",
}, # table
"multicastVlanStatusEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.26.3.1",
"status" : "current",
"linkage" : [
"multicastVlanStatusVlanID",
],
"description" :
"""An entry in multicastVlanStatusTable.""",
}, # row
"multicastVlanStatusVlanID" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.2172.16.31.10",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"multicastVlanStatusType" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.26.3.1.2",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"dynamic" : {
"nodetype" : "namednumber",
"number" : "1"
},
"mvr" : {
"nodetype" : "namednumber",
"number" : "2"
},
"static" : {
"nodetype" : "namednumber",
"number" : "3"
},
},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"multicastVlanQueryPort" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.26.3.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"Q-BRIDGE-MIB", "name" : "PortList"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"igmpFilteringProfileSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.27",
}, # node
"igmpFilteringMaxNumberOfProfile" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.27.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"igmpFilteringProfileTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.27.2",
"status" : "current",
"description" :
"""""",
}, # table
"igmpFilteringProfileEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.27.2.1",
"create" : "true",
"status" : "current",
"linkage" : [
"igmpFilteringProfileName",
"igmpFilteringProfileStartAddress",
"igmpFilteringProfileEndAddress",
],
"description" :
"""An entry in igmpFilteringProfileTable.""",
}, # row
"igmpFilteringProfileName" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.27.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"igmpFilteringProfileStartAddress" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.27.2.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"igmpFilteringProfileEndAddress" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.27.2.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"igmpFilteringProfileRowStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.27.2.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"mvrSetup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.28",
}, # node
"maxNumberOfMVR" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.28.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"mvrTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.28.2",
"status" : "current",
"description" :
"""""",
}, # table
"mvrEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.28.2.1",
"create" : "true",
"status" : "current",
"linkage" : [
"mvrVlanID",
],
"description" :
"""An entry in mvrTable.""",
}, # row
"mvrVlanID" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.28.2.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""1..4094""",
}, # column
"mvrName" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.28.2.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"mvrMode" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.28.2.1.3",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"dynamic" : {
"nodetype" : "namednumber",
"number" : "0"
},
"compatible" : {
"nodetype" : "namednumber",
"number" : "1"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"mvrRowStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.28.2.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"mvr8021pPriority" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.28.2.1.5",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readwrite",
"description" :
"""Set the 802.1p priority of control messages within MVR (0~7)""",
}, # column
"mvrPortTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.28.3",
"status" : "current",
"description" :
"""""",
}, # table
"mvrPortEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.28.3.1",
"status" : "current",
"linkage" : [
"mvrVlanID",
"dot1dBasePort",
],
"description" :
"""An entry in mvrPortTable.""",
}, # row
"mvrPortRole" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.28.3.1.1",
"status" : "current",
"syntax" : {
"type" : {
"basetype" : "Enumeration",
"none" : {
"nodetype" : "namednumber",
"number" : "1"
},
"source_port" : {
"nodetype" : "namednumber",
"number" : "2"
},
"receiver_port" : {
"nodetype" : "namednumber",
"number" : "3"
},
},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"mvrPortTagging" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.28.3.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"maxNumberOfMvrGroup" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.28.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"", "name" : "Integer32"},
},
"access" : "readonly",
"description" :
"""""",
}, # scalar
"mvrGroupTable" : {
"nodetype" : "table",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.28.5",
"status" : "current",
"description" :
"""""",
}, # table
"mvrGroupEntry" : {
"nodetype" : "row",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.28.5.1",
"create" : "true",
"status" : "current",
"linkage" : [
"mvrVlanID",
"mvrGroupName",
],
"description" :
"""An entry in mvrGroupTable.""",
}, # row
"mvrGroupName" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.28.5.1.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"RFC1213-MIB", "name" : "DisplayString"},
},
"access" : "readonly",
"description" :
"""""",
}, # column
"mvrGroupStartAddress" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.28.5.1.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"mvrGroupEndAddress" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.28.5.1.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-SMI", "name" : "IpAddress"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"mvrGroupRowStatus" : {
"nodetype" : "column",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.1.5.8.20.28.5.1.4",
"status" : "current",
"syntax" : {
"type" : { "module" :"SNMPv2-TC", "name" : "RowStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # column
"layer3Setup" : {
"nodetype" : "node",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.29",
}, # node
"routerRipState" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.29.1",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"routerIgmpState" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.29.2",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"routerDvmrpState" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.29.3",
"status" : "current",
"syntax" : {
"type" : { "module" :"P-BRIDGE-MIB", "name" : "EnabledStatus"},
},
"access" : "readwrite",
"description" :
"""""",
}, # scalar
"routerDvmrpThreshold" : {
"nodetype" : "scalar",
"moduleName" : "ZYXEL-GS4012F-MIB",
"oid" : "1.3.6.1.4.1.890.172.16.31.10.29.4",
"status" : "current",
"syntax" : | |
"""
===========================================================================
Modelize the static and dynamic behaviour ok an orienteering compass in the
Earth magnetic field
Every parameters are in international system units, angles in radians
===========================================================================
"""
# 1/ Imports
import math
import copy
from scipy.integrate import solve_ivp
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import xlrd
# 2/ Constants
MAG_PER = 1.25664e-06 # Magnetic permitivity
G = 9.81 # gravity acceleration
# 3/ Classes
class Compass:
"""
Representing an orienteering compass
Default is for GEONAUTE R500 compass
Attributes:
name: name of the compass
needle_length: metters
needle_width: metters
needle_thickness: metters
needle_disk_density: kg/m^3 default is PEHD 1200
disk_radius: friction disk radius, metters
disk_thickness: metters
mag_rem: magnet remanent magnetic field, Tesla
V: magnet volume, m^3
m: magnet mass, kg
magnet_mom_z: inertial moment of the magnet related to its z axis, kg.m^2
x: x offset of magnet mass center, metters
rho: liquid volumic mass, kg/m^3
viscosity: liquid dynamic viscosity, kg/m/s
Properties:
mom_z: inertial moment of the rotating assembly related to z axis
visc_coef: viscous coeficient for the frictions between liquid and disk +
needle
"""
def __init__(
self,
name="R500",
needle_length=0.032,
needle_width=0.008,
needle_thickness=0.00025,
needle_disk_density=1200,
disk_radius=0.0115,
disk_thickness=0.0001,
mag_rem=1.3,
V=6e-8,
m=0.00045,
magnet_mom_z=5.1e-09,
x=-0.0005,
rho=700,
viscosity=1.08,
z_h=0.004,
z_b=0.004,
):
self.name = name
self.needle_length = needle_length
self.needle_width = needle_width
self.needle_thickness = needle_thickness
self.needle_disk_density = needle_disk_density
self.disk_radius = disk_radius
self.disk_thickness = disk_thickness
self.mag_rem = mag_rem
self.V = V
self.m = m
self.magnet_mom_z = magnet_mom_z
self.x = x
self.rho = rho
self.viscosity = viscosity
self.z_h = z_h
self.z_b = z_b
@property
def mom_z(self):
"""
inertial moment of needle assembly related to z axis
"""
disk_mom = (
math.pi
* math.pow(self.disk_radius, 4)
* self.disk_thickness
* self.needle_disk_density
/ 2
)
needle_mom = (
self.needle_length
* self.needle_width
* self.needle_thickness
* self.needle_disk_density
* (math.pow(self.needle_length, 2) + math.pow(self.needle_width, 2))
/ 12
)
mom = (
disk_mom
+ needle_mom
+ self.magnet_mom_z
+ self.m * math.pow(self.x, 2)
)
# mom = 7.8e-9
return mom
@property
def visc_coef(self):
"""
inertial moment of needle assembly related to z axis
"""
coef = (
self.viscosity
* (1 / self.z_h + 1 / self.z_b)
* (
math.pi * math.pow(self.disk_radius, 4) / 2
+ (
math.pow(self.needle_length, 3) / 8
- math.pow(self.disk_radius, 3)
)
* self.needle_width
/ 3
+ (self.needle_length / 2 - self.disk_radius)
* math.pow(self.needle_width, 3)
/ 12
)
)
# coef = 8e-8
return coef
class MagneticField:
"""
Representing the magnetic field at a given position on Earth
Default is for Lille in France
Attributes:
name: name of the location
lat: latitude
lon: longitude
int: Earth magnetic field intensity in Tesla
i_deg: Magnetic field inclination in degrees, positive when pointing down
Properties:
i: Magnetic field inclination in radians, positive when pointing down
"""
def __init__(
self,
name="Lille",
lat=50.6333,
lon=3.0667,
intensity=4.8699e-5,
i_deg=65.822,
):
self.name = name
self.lat = lat
self.lon = lon
self.int = intensity
self.i_deg = i_deg
@property
def i(self):
"""
Magnetic field inclination in radians, positive when pointing down
"""
return math.radians(self.i_deg)
class Balance:
"""
Computing the balance tests of the compass
Attributes:
comp: Compass object
mg_fld: MagneticField object
theta_lim: Limit angle of lateral inclination of the compass, degrees
alpha_lim: Limit of tolerance for the angle between the needle and the
north when inclining the compass
Properties:
x_opti: optimal offset of the magnet so the compass is perfectly balanced
for the given magnetic field.
alpha_err: angle of the needle with north when the compass is inclined of
theta_lim
"""
def __init__(self, comp, mg_fld, theta_lim=40, alpha_lim=0):
self.comp = comp
self.mg_fld = mg_fld
self.theta_lim = theta_lim
self.alpha_lim = alpha_lim
@property
def x_opti(self):
"""
The optimal offset of the magnet so the compass is perfectly balanced
for the given magnetic field
"""
return (
-self.comp.mag_rem
* self.comp.V
* self.mg_fld.int
* math.sin(self.mg_fld.i)
/ (MAG_PER * (self.comp.m - self.comp.rho * self.comp.V) * G)
)
@property
def alpha_err(self):
"""
Calculate the angle of the needle with north when the compass is inclined of theta_lim
"""
return math.atan(
(
(self.comp.rho * self.comp.V - self.comp.m)
* G
* self.comp.x
* MAG_PER
/ (
self.comp.mag_rem
* self.mg_fld.int
* math.cos(self.mg_fld.i)
)
- math.tan(self.mg_fld.i)
)
* math.sin(math.radians(self.theta_lim))
)
class Dynamic:
"""
Class for rapidity and stability tests
Attributes:
comp: Compass tested
mg_fld: Magnetic field for the test
alpha_init_deg: Initial needle angle in degrees
tf_rap: Final time rapidity test in seconds
tf_stab: Final time stability test in seconds
t_int: Integration time interval in seconds
Y: Oscillation range in meters
f: Oscillation frenquency in steps (double steps) per minutes
exp_coef_mg: Ajustment coeficient to ajuste the simulation with
experimental results
exp_coef_visc: Ajustment coeficient to ajuste the simulation with
experimental results
tho_lim: Limit for tho in degrees, default 5°
tho: Time when the angle of the needle stay under tho_lim compared to
north, for rapidity test
stab_amp: Amplitude of the needle oscillation for the stability test,
calculated for the second half of the time range. Degrees
"""
def __init__(
self,
comp,
mg_fld,
alpha_init_deg=90,
tf_rap=3,
tf_stab=5,
t_int=0.01,
Y=0.093,
f=70,
exp_coef_mg=1,
exp_coef_visc=1,
tho_lim=5,
):
self.comp = comp
self.mg_fld = mg_fld
self.alpha_init_deg = alpha_init_deg
self.tf_rap = tf_rap
self.tf_stab = tf_stab
self.t_int = t_int
self.Y = Y
self.f = f
self.exp_coef_mg = exp_coef_mg
self.exp_coef_visc = exp_coef_visc
self.tho_lim = tho_lim
self.t_rap = np.arange(
0, self.tf_rap, self.t_int
) # Time range for rapidity
self.t_stab = np.arange(
0, self.tf_stab, self.t_int
) # Time range for stability
self.rapidity_results = None
self.stability_results = None
self.stability_results_s = None
self.rapidity_results_s = None
self.rapidity_results_exp = None
self.tho = None
self.stab_amp = None
@property
def alpha_init(self):
return math.radians(self.alpha_init_deg)
@property
def w(self):
return math.pi * self.f / 60
# Parameters for exact simulation
@property
def mg_trm(self):
"""
Magnetic term of the second order equation.
"""
return (
-self.exp_coef_mg
* self.comp.mag_rem
* self.comp.V
* self.mg_fld.int
* math.cos(self.mg_fld.i)
/ (MAG_PER * self.comp.mom_z)
)
@property
def visc_trm(self):
"""
Viscous term of the second order equation.
"""
# The following comment return a viscous term that match with the
# experimental plot shape
# return -6 * math.sqrt(-self.mg_trm) / 5
return -self.exp_coef_visc * self.comp.visc_coef / self.comp.mom_z
@property
def ext_trm(self):
"""
external excitation term of the second order equation.
"""
return (
self.comp.m
* self.comp.x
* self.Y
* math.pow((math.pi * self.f / 30), 2)
/ self.comp.mom_z
)
# Parameters for simplified simulation (small angles hypothesis)
@property
def amplification(self):
"""
Amplification factor if small angles hypothesis (linear equation).
"""
return 1 / (
math.sqrt(
math.pow(
self.mg_trm * self.comp.mom_z
- self.comp.mom_z * math.pow(self.w, 2),
2,
)
+ math.pow(self.comp.visc * self.w, 2)
)
)
@property
def phase(self):
"""
Phase offset if small angles hypothesis (linear equation).
"""
return math.atan(
self.comp.visc * self.w / (self.comp.mom_z * math.pow(self.w, 2))
- self.mg_trm * self.comp.mom_z
)
def rapidity(self):
def F(t, x):
xdot = [[], []]
xdot[0] = self.mg_trm * math.sin(x[1]) + self.visc_trm * x[0]
xdot[1] = x[0]
return xdot
sol = solve_ivp(
fun=F,
t_span=(0, self.tf_rap),
y0=[0, self.alpha_init],
t_eval=self.t_rap,
)
self.rapidity_results = np.degrees(sol.y[1])
self.calculate_tho()
def rapidity_simple(self):
def F(t, x):
xdot = [[], []]
xdot[0] = self.mg_trm * x[1] + self.visc_trm * x[0]
xdot[1] = x[0]
return xdot
sol = solve_ivp(
fun=F,
t_span=(0, self.tf_rap),
y0=[0, self.alpha_init],
t_eval=self.t_rap,
)
self.rapidity_results_s = sol.y[1]
def rapidity_exp(self, file_name):
doc = xlrd.open_workbook(file_name)
sheet_1 = doc.sheet_by_index(0)
rows = sheet_1.nrows
time = []
alpha = []
for r in range(1, rows):
time.append(sheet_1.cell_value(rowx=r, colx=0))
alpha.append(sheet_1.cell_value(rowx=r, colx=1))
if abs(sheet_1.cell_value(rowx=r, colx=1)) > 5:
self.tho = sheet_1.cell_value(rowx=r, colx=0)
self.rapidity_results_exp = (time, alpha)
def stability(self):
def F(t, x):
xdot = [[], []]
xdot[0] = (
self.mg_trm * math.sin(x[1])
+ self.visc_trm * x[0]
+ self.ext_trm
* math.cos(x[1])
* math.sin(math.pi * self.f * t / 30)
)
xdot[1] = x[0]
return xdot
sol = solve_ivp(
fun=F,
t_span=(0, self.tf_stab),
y0=[0, 0],
t_eval=self.t_stab,
)
self.stability_results = np.degrees(sol.y[1])
self.calculate_stab_amp()
def stability_simple(self):
def F(t, x):
xdot = [[], []]
xdot[0] = (
self.mg_trm * x[1]
+ self.visc_trm * x[0]
+ self.ext_trm * math.sin(math.pi * self.f * t / 30)
)
xdot[1] = x[0]
return xdot
sol = solve_ivp(
fun=F, t_span=(0, self.tf_stab), y0=[0, 0], t_eval=self.t_stab
)
self.stability_results_s = sol.y[1]
def display_stab(self):
plt.plot(self.t_stab, self.stability_results)
# stab_sa = [(self.amplification * math.cos(self.w * t + self.phase)) for t in self.t_stab]
| |
<filename>search/COSP/network.py
import torch
import torch.nn as nn
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
from cosp_nas.supernet.blocks import Shufflenet, Shuffle_Xception
import math
from typing import NamedTuple
import numpy as np
import sys
sys.path.append("..")
from cosp_nas.utils import CachedLookup, compute_in_batches, get_inner_model
from cosp_nas.evaluator import get_costs
from .problem import Subnet, make_state
def set_decode_type(model, decode_type):
if isinstance(model, DataParallel) or isinstance(model, DistributedDataParallel):
model = model.module
model.set_decode_type(decode_type)
class AttentionModelFixed(NamedTuple):
"""
Context for AttentionModel decoder that is fixed during decoding so can be precomputed/cached
This class allows for efficient indexing of multiple Tensors at once
"""
node_embeddings: torch.Tensor
context_node_projected: torch.Tensor
glimpse_key: torch.Tensor
glimpse_val: torch.Tensor
logit_key: torch.Tensor
def __getitem__(self, key):
if torch.is_tensor(key) or isinstance(key, slice):
return AttentionModelFixed(
node_embeddings=self.node_embeddings[key],
context_node_projected=self.context_node_projected[key],
glimpse_key=self.glimpse_key[:, key], # dim 0 are the heads
glimpse_val=self.glimpse_val[:, key], # dim 0 are the heads
logit_key=self.logit_key[key]
)
# return super(AttentionModelFixed, self).__getitem__(key)
return self[key]
class AttentionModel(nn.Module):
def __init__(self,
embedding_dim,
hidden_dim,
feed_forward_hidden,
supernet,
n_encode_layers=2,
tanh_clipping=10.,
mask_inner=True,
mask_logits=True,
normalization='batch',
n_heads=8
):
super(AttentionModel, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.n_encode_layers = n_encode_layers
self.decode_type = None
self.temp = 1.0
self.tanh_clipping = tanh_clipping
self.mask_inner = mask_inner
self.mask_logits = mask_logits
self.supernet = supernet
self.n_heads = n_heads
# Problem specific context parameters (placeholder and step context dimension)
step_context_dim = embedding_dim # Embedding of last node
node_dim = 2 # layer, operator
# Learned input symbols for first action
self.W_placeholder = nn.Parameter(torch.Tensor(step_context_dim))
self.W_placeholder.data.uniform_(-1, 1) # Placeholder should be in range of activations
self.init_embed = nn.Linear(node_dim, embedding_dim)
self.encoder = GraphAttentionEncoder(
n_heads=n_heads,
embed_dim=embedding_dim,
n_layers=self.n_encode_layers,
normalization=normalization,
feed_forward_hidden = feed_forward_hidden
)
# For each node we compute (glimpse key, glimpse value, logit key) so 3 * embedding_dim
self.project_node_embeddings = nn.Linear(embedding_dim, 3 * embedding_dim, bias=False)
self.project_fixed_context = nn.Linear(embedding_dim, embedding_dim, bias=False)
self.project_step_context = nn.Linear(step_context_dim, embedding_dim, bias=False)
assert embedding_dim % n_heads == 0
# Note n_heads * val_dim == embedding_dim so input to project_out is embedding_dim
self.project_out = nn.Linear(embedding_dim, embedding_dim, bias=False)
def set_decode_type(self, decode_type, temp=None):
self.decode_type = decode_type
if temp is not None: # Do not change temperature if not provided
self.temp = temp
def forward(self, input,return_pi=False):
"""
:param input: (batch_size, graph_size, node_dim) input node features or dictionary with multiple tensors
:param return_pi: whether to return the output sequences, this is optional as it is not compatible with
using DataParallel as the results may be of different lengths on different GPUs
:return:
"""
# print("one ! ")
# print(input.shape)
# print(input.device)
# print("network")
# from IPython import embed
# embed()
embeddings, _ = self.encoder(self.init_embed(input))
_log_p, pi = self._decode(input, embeddings)
# print(args.device," : ",pi)
# while True:
# pass
# # BREAKPOINT
# return
# print("_log_p shape : ",_log_p.shape)
# print("pi shape : ",pi.shape)
cost_1, cost_5, mask = get_costs(self.supernet, input, pi)
# print("cost's device : ",cost.device)
# return cost
# print("cost's shape is = ",cost_1.shape)
# Log likelyhood is calculated within the model since returning it per action does not work well with
# DataParallel since sequences can be of different lengths
ll = self._calc_log_likelihood(_log_p, pi, mask)
if return_pi:
return cost_1, cost_5, ll, pi
return cost_1, cost_5, ll
def _calc_log_likelihood(self, _log_p, a, mask):
# Get log_p corresponding to selected actions
log_p = _log_p.gather(2, a.unsqueeze(-1)).squeeze(-1)
# Optional: mask out actions irrelevant to objective so they do not get reinforced
if mask is not None:
log_p[mask] = 0
assert (log_p > -1000).data.all(), "Logprobs should not be -inf, check sampling procedure!"
# Calculate log_likelihood
return log_p.sum(1)
def _decode(self, input, embeddings):
outputs = []
sequences = []
# print("inner!")
state = make_state(get_inner_model(self.supernet),input)
# print("state = ",state)
# Compute keys, values for the glimpse and keys for the logits once as they can be reused in every step
fixed = self._precompute(embeddings)
batch_size = state.batch_size
# Perform decoding steps
for i in range(get_inner_model(self.supernet).n_layer):
log_p, mask = self._get_log_p(fixed, state)
# Select the indices of the next nodes in the sequences, result (batch_size) long
selected = self._select_node(log_p.exp()[:, 0, :], mask[:, 0, :]) # Squeeze out steps dimension
state.update(selected)
outputs.append(log_p[:, 0, :])
sequences.append(selected)
# Collected lists, return Tensor
return torch.stack(outputs, 1), torch.stack(sequences, 1)
def _select_node(self, probs, mask):
assert (probs == probs).all(), "Probs should not contain any nans"
if self.decode_type == "greedy":
_, selected = probs.max(1)
assert not mask.gather(1, selected.unsqueeze(
-1)).data.any(), "Decode greedy: infeasible action has maximum probability"
elif self.decode_type == "sampling":
selected = probs.multinomial(1).squeeze(1)
# Check if sampling went OK, can go wrong due to bug on GPU
# See https://discuss.pytorch.org/t/bad-behavior-of-multinomial-function/10232
while mask.gather(1, selected.unsqueeze(-1)).data.any():
print('Sampled bad values, resampling!')
selected = probs.multinomial(1).squeeze(1)
else:
assert False, "Unknown decode type"
return selected
def _precompute(self, embeddings, num_steps=1):
# The fixed context projection of the graph embedding is calculated only once for efficiency
graph_embed = embeddings.mean(1)
# fixed context = (batch_size, 1, embed_dim) to make broadcastable with parallel timesteps
fixed_context = self.project_fixed_context(graph_embed)[:, None, :]
# The projection of the node embeddings for the attention is calculated once up front
glimpse_key_fixed, glimpse_val_fixed, logit_key_fixed = \
self.project_node_embeddings(embeddings[:, None, :, :]).chunk(3, dim=-1)
# No need to rearrange key for logit as there is a single head
fixed_attention_node_data = (
self._make_heads(glimpse_key_fixed, num_steps),
self._make_heads(glimpse_val_fixed, num_steps),
logit_key_fixed.contiguous()
)
return AttentionModelFixed(embeddings, fixed_context, *fixed_attention_node_data)
def _get_log_p(self, fixed, state, normalize=True):
# Compute query = context node embedding
query = fixed.context_node_projected + \
self.project_step_context(self._get_parallel_step_context(fixed.node_embeddings, state))
# Compute keys and values for the nodes
glimpse_K, glimpse_V, logit_K = self._get_attention_node_data(fixed, state)
# Compute the mask
mask = state.get_mask()
# Compute logits (unnormalized log_p)
log_p, glimpse = self._one_to_many_logits(query, glimpse_K, glimpse_V, logit_K, mask)
if normalize:
log_p = torch.log_softmax(log_p / self.temp, dim=-1)
assert not torch.isnan(log_p).any()
return log_p, mask
def _get_parallel_step_context(self, embeddings, state, from_depot=False):
"""
Returns the context per step, optionally for multiple steps at once (for efficient evaluation of the model)
:param embeddings: (batch_size, graph_size, embed_dim)
:param prev_a: (batch_size, num_steps)
:param first_a: Only used when num_steps = 1, action of first step or None if first step
:return: (batch_size, num_steps, context_dim)
"""
# print("state = ",state)
current_node = state.get_current_node()
batch_size, num_steps = current_node.size()
assert num_steps == 1 , "num_steps must be 1 ! !"
if state.now == 0:
return self.W_placeholder[None, None, :].expand(batch_size, 1, self.W_placeholder.size(-1))
else:
return embeddings.gather(
1,
current_node[:, :, None].expand(batch_size, 1, embeddings.size(-1))
)
def _one_to_many_logits(self, query, glimpse_K, glimpse_V, logit_K, mask):
batch_size, num_steps, embed_dim = query.size()
key_size = val_size = embed_dim // self.n_heads
# Compute the glimpse, rearrange dimensions so the dimensions are (n_heads, batch_size, num_steps, 1, key_size)
glimpse_Q = query.view(batch_size, num_steps, self.n_heads, 1, key_size).permute(2, 0, 1, 3, 4)
# Batch matrix multiplication to compute compatibilities (n_heads, batch_size, num_steps, graph_size)
compatibility = torch.matmul(glimpse_Q, glimpse_K.transpose(-2, -1)) / math.sqrt(glimpse_Q.size(-1))
if self.mask_inner:
assert self.mask_logits, "Cannot mask inner without masking logits"
compatibility[mask[None, :, :, None, :].expand_as(compatibility)] = -math.inf
# Batch matrix multiplication to compute heads (n_heads, batch_size, num_steps, val_size)
heads = torch.matmul(torch.softmax(compatibility, dim=-1), glimpse_V)
# Project to get glimpse/updated context node embedding (batch_size, num_steps, embedding_dim)
glimpse = self.project_out(
heads.permute(1, 2, 3, 0, 4).contiguous().view(-1, num_steps, 1, self.n_heads * val_size))
# Now projecting the glimpse is not needed since this can be absorbed into project_out
# final_Q = self.project_glimpse(glimpse)
final_Q = glimpse
# Batch matrix multiplication to compute logits (batch_size, num_steps, graph_size)
# logits = 'compatibility'
logits = torch.matmul(final_Q, logit_K.transpose(-2, -1)).squeeze(-2) / math.sqrt(final_Q.size(-1))
# From the logits compute the probabilities by clipping, masking and softmax
if self.tanh_clipping > 0:
logits = torch.tanh(logits) * self.tanh_clipping
if self.mask_logits:
logits[mask] = -math.inf
return logits, glimpse.squeeze(-2)
def _get_attention_node_data(self, fixed, state):
return fixed.glimpse_key, fixed.glimpse_val, fixed.logit_key
def _make_heads(self, v, num_steps=None):
assert num_steps is None or v.size(1) == 1 or v.size(1) == num_steps
return (
v.contiguous().view(v.size(0), v.size(1), v.size(2), self.n_heads, -1)
.expand(v.size(0), v.size(1) if num_steps is None else num_steps, v.size(2), self.n_heads, -1)
.permute(3, 0, 1, 2, 4) # (n_heads, batch_size, num_steps, graph_size, head_dim)
)
class SkipConnection(nn.Module):
def __init__(self, module):
super(SkipConnection, self).__init__()
self.module = module
def forward(self, input):
return input + self.module(input)
class MultiHeadAttention(nn.Module):
def __init__(
self,
n_heads,
input_dim,
embed_dim=None,
val_dim=None,
key_dim=None
):
super(MultiHeadAttention, self).__init__()
if val_dim is None:
assert embed_dim is not None, "Provide either embed_dim or val_dim"
val_dim = embed_dim // n_heads
if key_dim is None:
key_dim = val_dim
self.n_heads = n_heads
self.input_dim = input_dim
self.embed_dim = embed_dim
self.val_dim = val_dim
self.key_dim = key_dim
self.norm_factor = 1 / math.sqrt(key_dim) # | |
<filename>genens/gp/types.py
# -*- coding: utf-8 -*-
"""
This module defines the structure of GP primitives.
The GP primitives are nodes with typed edges (parent input and child output types must match) and variable
arity (for a given type, its final arity can be chosen during the evolution process).
A ``GpPrimitive`` is a node whose types, arities and keyword arguments have been decided. To create such primitives,
it is possible to take use of templates. These contain possible values of arities, types and keyword arguments and
methods for choosing final values.
The primitive templates defined in this file are
1) functions - inner nodes of the tree, transform input into output
2) terminals - leaves of the tree, provide constant output.
"""
import functools
import random
from copy import deepcopy
from deap import base
from typing import Callable, List, Any, Dict, Union, Tuple
class GpTreeIndividual:
"""Represents a tree individual used in GP.
The individual is a tree encoded as a list of ``GpPrimitive`` nodes.
The list is a post-order representation of the tree. The tree can be
uniquely reconstructed using the arity (and types) of primitives.
"""
def __init__(self, prim_list: List['GpPrimitive'], max_height: int):
"""
Construct a GP tree from a list of primitives.
Args:
prim_list: list of GpPrimitive prim_list: Post-order representation of the tree.
max_height: Height of the tree - maximum of all node depths + 1
"""
self.primitives = prim_list
self.max_height = max_height
self.validate_tree()
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
new = object.__new__(type(self))
memo[id(self)] = new
new.__dict__.update(deepcopy(self.__dict__, memo))
return new
def __eq__(self, other):
if not isinstance(other, GpTreeIndividual):
return False
if self.primitives != other.primitives:
return False
return True
def __repr__(self):
return f'GpTreeIndividual(height={self.max_height} primitives={self.primitives.__repr__()})'
def run_tree(self, node_func, group_children: bool = False) -> Any:
"""
Applies a function with the signature ``func(node, child_list)`` on all
nodes in the tree. The tree is traversed in post-order.
The arguments of the function are a node and list of result values of its child nodes.
:param node_func: Function which is applied on all nodes of the tree.
:return: Return value of the root.
"""
stack = []
for node in self.primitives:
if node.arity == 0:
stack.append(node_func(node, []))
else:
args = stack[-node.arity:]
stack = stack[:-node.arity]
if group_children:
children_by_type = []
for t in node.in_type:
t_children, args = args[-t.arity:], args[:-t.arity]
children_by_type.append((t.name, t_children))
args = children_by_type
stack.append(node_func(node, args))
if len(stack) > 1:
raise ValueError("Bad tree - invalid primitive list.")
return stack.pop()
def subtree(self, root_ind: int) -> Tuple[int, int]:
"""
Returns the start position of the subtree with primitive `self.primitives[root_ind]` as root. Note
that the returned value is in fact the index of one of the leaves, as the node list is post-order.
As so, the whole subtree is extracted with `self.primitives[subtree(root_ind), root_ind + 1]`.
Args:
root_ind: Position of the root (index of the beginning).
Returns: A tuple `(s, h)`, where `s` is the start index and `h` is the height of the subtree.
"""
curr = self.primitives[root_ind]
arity_rem = curr.arity
init_h = curr.depth
max_h = init_h
while arity_rem > 0:
root_ind = root_ind - 1
curr = self.primitives[root_ind]
max_h = max(max_h, curr.depth)
arity_rem = arity_rem - 1 + curr.arity
return root_ind, (max_h - init_h + 1)
def validate_tree(self):
"""
Validates the tree, raises an exception if its children are invalid.
"""
def validate_node(node, child_list):
if node.arity != len(child_list):
raise ValueError("Invalid number of children.")
child_id = 0
for in_type in node.in_type:
for i in range(in_type.arity):
child = child_list[child_id + i]
if child.out_type != in_type.name:
raise ValueError("Invalid child type.")
if child.depth != node.depth + 1:
raise ValueError("Invalid child height.")
child_id += in_type.arity
return node
self.run_tree(validate_node)
if self.max_height != max(prim.depth + 1 for prim in self.primitives):
raise ValueError("Invalid tree height.")
class DeapTreeIndividual(GpTreeIndividual):
"""
Represents an individual with DEAP-compatible fitness.
"""
def __init__(self, prim_list: List['GpPrimitive'], max_height: int):
super().__init__(prim_list, max_height)
self.fitness = DeapTreeIndividual.Fitness() # (score, log(evaluation_time))
self.compiled_pipe = None
class Fitness(base.Fitness):
def __init__(self, values=()):
self.weights = (1.0, -1.0)
super().__init__(values)
def reset(self):
del self.fitness.values
self.compiled_pipe = None
class GpPrimitive:
"""
Represents a typed node in the GP tree.
Its name and keyword dictionary hold information about the function
or object, which is associated with the node.
"""
def __init__(self,
name: str,
obj_kwargs: Dict[str, Any],
in_type: List['GpPrimitive.InputType'],
out_type: str,
arity: int,
depth: int):
"""
Creates an instance of a GP tree node. The number and output types as well as the ordering of its children
is specified by `in_type`.
Args:
name: Name of the node.
obj_kwargs: Keyword arguments associated with the node.
in_type:
List of input types with arity. The subtypes are ordered - e.g. [('data', 2), ('ens', 1)] is not the
same as [('ens', 1), ('data', 2)].
arity: Sum of arity of subtypes.
depth: Depth of the node.
"""
self.name = name
self.obj_kwargs = obj_kwargs
self.in_type = in_type
self.out_type = out_type
self.arity = arity
self.depth = depth
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
new = object.__new__(type(self))
memo[id(self)] = new
new.__dict__.update(deepcopy(self.__dict__, memo))
return new
def __eq__(self, other):
if not isinstance(other, GpPrimitive):
return False
if self.name != other.name:
return False
if self.arity != other.arity or self.in_type != other.in_type or self.out_type != other.out_type:
return False
if self.obj_kwargs != other.obj_kwargs:
return False
return True
def __repr__(self):
return 'GpPrimitive(name=' + self.name + ", arity={}".format(self.arity)\
+ ", height={})".format(self.depth)
class InputType:
"""
Represents the input type of a primitive. It determines how many children with a specific output type
should the node have.
"""
def __init__(self, name: str, arity: int):
"""
Construct a new instance of input type with arity.
:param name: Name of the type.
:param arity: Arity of this type.
"""
self.name = name
self.arity = arity
def __eq__(self, other):
if not isinstance(other, GpPrimitive.InputType):
return False
if self.name != other.name or self.arity != other.arity:
return False
return True
class GpTerminalTemplate:
"""
Represents a terminal of the GP tree, or a primitive with no inputs.
The output type is fixed.
The keyword arguments are chosen from lists of possible values.
"""
def __init__(self, name: str, out_type: str, group: str = None):
"""
Creates a new instance of a terminal template.
Args:
name: Name of the node.
out_type: Name of the output type.
"""
self.name = name
self.type_arity_template = []
self.out_type = out_type
self.group = group
def __repr__(self):
return f"GpTerminalTemplate: {self.name} - {self.group}"
def create_primitive(self, curr_height: int, max_arity: int, kwargs_dict: Dict[str, List[Any]]) -> GpPrimitive:
"""
Creates an instance of a `GpPrimitive` from the template.
Selects keyword arguments from `kwargs_dict`. For every key,
the dict contains a list of possible values.
Args:
curr_height: Height at which the node is generated.
max_arity: Only for compatibility, not used for terminals.
kwargs_dict: Dictionary which contains possible keyword argument values.
Return: A new instance of `GpPrimitive`.
"""
prim_kwargs = _choose_kwargs(kwargs_dict)
return GpPrimitive(self.name, prim_kwargs, [], self.out_type, 0, curr_height)
class TypeArity:
"""
Represents a variable node arity associated with a type.
"""
def __init__(self,
prim_type: str,
arity_template: Union[int, Tuple[int, int], Tuple[int, str]]):
"""
Constructs a new instance of a type template - with a fixed type, but possibly variable arity.
Args:
prim_type: Name of the type.
arity_template: Either a fixed arity value, or a bounded interval (a, b) where a and b are integers,
or an interval (a, 'n') that has only a lower bound a ('n' is a string).
"""
self.prim_type = prim_type
self.arity_template = arity_template
# check arity range
if isinstance(self.arity_template, tuple):
lower_invalid = self.arity_template[0] < 0
upper_invalid = self.arity_template[1] != 'n' and self.arity_template[0] > self.arity_template[1]
if lower_invalid or upper_invalid:
raise ValueError("Invalid arity range.")
# check fixed arity
elif isinstance(self.arity_template, int):
if self.arity_template <= 0:
raise ValueError("Arity must be greater than 0.")
else:
raise ValueError("Invalid arity type.")
def is_valid_arity(self, arity: int):
"""
Determines whether `self.choose_arity` could possibly result in `arity`.
Args:
arity: Input arity to compare with this template.
Returns: True if `arity` can be created from this template.
"""
if isinstance(self.arity_template, tuple):
# out of range
if arity < self.arity_template[0]:
return False
if self.arity_template[1] != 'n' and arity > self.arity_template[1]:
return False
# inside range
return True
# | |
math.hypot(cl[0][0] - cl[1][0], cl[1][1] - cl[1][1])
if cllen > maxlen:
maxlen = cllen
self.bestcl = cl
# print("cl maxlen %d" % mindif)
self.bestll = None
if curlline is not None:
mindif = 10000
maxlen = 0
for ll in curlline:
lldeg = self.orientation(ll)
if self.lline[2] is not None:
lastlldeg = self.orientation(self.lline[2])
if abs(lldeg - lastlldeg) < mindif:
self.bestll = ll
mindif = abs(lldeg - lastlldeg)
# print("ll mindif %d" % mindif)
else:
lllen = math.hypot(ll[0][0] - ll[1][0], ll[1][1] - ll[1][1])
if lllen > maxlen:
self.bestll = ll
maxlen = lllen
# print("ll maxlen %d" % mindif)
self.bestrl = None
if currline is not None:
mindif = 10000
maxlen = 0
for rl in currline:
rldeg = self.orientation(rl)
if self.rline[2] is not None:
lastrldeg = self.orientation(self.rline[2])
if abs(rldeg - lastrldeg) < mindif:
self.bestrl = rl
mindif = abs(rldeg - lastrldeg)
# print("rl mindif %d" % mindif)
else:
rllen = math.hypot(rl[0][0] - rl[1][0], rl[1][1] - rl[1][1])
if rllen > maxlen:
self.bestrl = rl
maxlen = rllen
# print("rl maxlen %d" % mindif)
self.curconf = 0
if self.bestrl is not None and self.bestcl is not None and self.bestrl[0][0] < self.bestcl[0][0] and self.bestrl[1][0] < self.bestcl[1][0]:
tmp = self.bestcl
self.bestcl = self.bestrl
self.bestrl = tmp
if self.bestll is not None and self.bestcl is not None and self.bestcl[0][0] < self.bestll[0][0] and self.bestcl[1][0] < self.bestll[1][0]:
tmp = self.bestll
self.bestll = self.bestcl
self.bestcl = tmp
if self.bestrl is not None and self.bestll is not None and self.bestrl[0][0] < self.bestll[0][0] and self.bestrl[1][0] < self.bestll[1][0]:
tmp = self.bestll
self.bestll = self.bestrl
self.bestrl = tmp
if self.bestrl is not None and self.bestll is not None and self.bestrl[0][0] == self.bestll[0][0] and self.bestrl[1][0] == self.bestll[1][0]:
# self.bestrl and self.bestll are the same
if midpix > self.bestll[0][0]:
self.bestrl = None
else:
self.bestll = None
if ((self.bestrl is not None and self.bestll is not None and self.bestcl is None and abs(self.bestrl[0][0] - self.bestll[0][0]) < self.laneWidth / 2) and abs(self.bestrl[0][0] - self.bestll[0][0]) > self.laneWidth / 6):
# bestrl is too close to bestll. One is center
for i in (2,1,0):
foundR = False
foundL = False
foundC = False
if self.bestll is not None and self.cline[i] is not None and self.check(self.bestll, self.cline[i]):
self.bestcl = self.bestll
self.bestll = None
foundC = True
break
if self.bestrl is not None and self.cline[i] is not None and self.check(self.bestrl, self.cline[i]):
self.bestcl = self.bestrl
self.bestrl = None
foundC = True
break
if self.bestll is not None and self.lline[i] is not None and self.check(self.bestll, self.lline[i]):
foundL = True
if self.bestrl is not None and self.rline[i] is not None and self.check(self.bestrl, self.rline[i]):
foundR = True
if foundC:
pass
elif foundL and not foundR:
self.bestcl = self.bestrl
self.bestrl = None
elif not foundL and foundR:
self.bestcl = self.bestll
self.bestll = None
if ((self.bestrl is not None and self.bestll is not None and abs(self.bestrl[0][0] - self.bestll[0][0]) < self.laneWidth / 6) or
(self.bestcl is not None and self.bestll is not None and abs(self.bestcl[0][0] - self.bestll[0][0]) < self.laneWidth / 6) or
(self.bestcl is not None and self.bestrl is not None and abs(self.bestcl[0][0] - self.bestrl[0][0]) < self.laneWidth / 6)):
# best lines are too close
foundR = False
foundL = False
foundC = False
for i in (2,1,0):
if self.bestrl is not None and self.rline[i] is not None and self.check(self.bestrl, self.rline[i]):
foundR = True
if self.bestll is not None and self.lline[i] is not None and self.check(self.bestll, self.lline[i]):
foundL = True
if self.bestcl is not None and self.cline[i] is not None and self.check(self.bestcl, self.cline[i]):
foundC = True
if (self.bestrl is not None and self.bestll is not None and abs(self.bestrl[0][0] - self.bestll[0][0]) < self.laneWidth / 6):
if (foundR and foundL) or (not foundR and not foundL):
if midpix > self.bestll[0][0]:
self.bestrl = None
else:
self.bestll = None
elif foundR and not foundL:
self.bestll = None
elif not foundR and foundL:
self.bestrl = None
if (self.bestcl is not None and self.bestll is not None and abs(self.bestcl[0][0] - self.bestll[0][0]) < self.laneWidth / 6):
if (foundC and foundL) or (not foundC and not foundL):
if abs(midpix - self.bestcl[0][0]) < self.laneWidth / 6:
self.bestll = None
elif (midpix - self.bestll[0][0]) > self.laneWidth / 6:
self.bestcl = None
else:
self.bestll = None
elif foundC and not foundL:
self.bestll = None
elif not foundC and foundL:
self.bestcl = None
if (self.bestcl is not None and self.bestrl is not None and abs(self.bestcl[0][0] - self.bestrl[0][0]) < self.laneWidth / 6):
if (foundC and foundR) or (not foundC and not foundR):
if abs(midpix - self.bestcl[0][0]) < self.laneWidth / 6:
self.bestrl = None
elif (self.bestrl[0][0] - midpix) > self.laneWidth / 6:
self.bestcl = None
else:
self.bestrl = None
elif foundC and not foundR:
self.bestrl = None
elif not foundC and foundR:
self.bestcl = None
'''
# following has been replaced by above
if self.bestrl is not None and self.bestll is not None and self.bestrl[0][0] == self.bestll[0][0] and self.bestrl[0][1] == self.bestll[0][1] and self.bestrl[1][0] == self.bestll[1][0] and self.bestrl[1][1] == self.bestll[1][1]:
if self.rline[i] is not None and self.check(self.bestrl, self.rline[i]):
self.bestll = None
break
elif self.lline[i] is not None and self.check(self.bestll, self.lline[i]):
self.bestrl = None
break
elif self.lline[i] is None and self.bestll is None and self.bestrl is not None:
self.bestll = None
break
elif self.rline[i] is None and self.bestrl is None and self.bestll is not None:
self.bestrl = None
break
else:
if self.rline[i] is not None and self.bestll is not None:
rl = self.rline[i]
if self.bestll[0][0] < rl[0][0] and self.bestll[1][0] < rl[1][0]:
self.bestrl = None
break
if self.lline[i] is not None and self.bestll is not None:
ll = self.lline[i]
if self.bestrl[0][0] > ll[0][0] and self.bestrl[1][0] > ll[1][0]:
self.bestll = None
break
'''
########################
if (((self.bestrl is None and self.bestvprl is not None) or
(self.bestrl is not None and np.array_equal(self.bestrl,self.bestvprl))) and
((self.bestcl is None and self.bestvpcl is not None) or
(self.bestcl is not None and np.array_equal(self.bestcl,self.bestvpcl))) and
((self.bestll is None and self.bestvpll is not None) or
(self.bestll is not None and np.array_equal(self.bestcl,self.bestvpll)))):
self.bestrl = self.bestvprl
self.bestcl = self.bestvpcl
self.bestll = self.bestvpll
########################
# Done, set globals and return vals
########################
tmppos = self.setCurPos(self.bestll, self.bestcl, self.bestrl, self.pos[2])
if tmppos >= 0:
self.curpos = tmppos
del(self.lline[0])
self.lline.append(self.bestll)
del(self.cline[0])
self.cline.append(self.bestcl)
del(self.rline[0])
self.rline.append(self.bestrl)
del(self.pos[0])
self.pos.append(self.curpos)
self.conf = self.curconf
# print("self.lline")
# print(self.lline)
# print(self.bestll)
# print("self.cline")
# print(self.cline)
# print(self.bestcl)
# print(self.cline[2])
# print("self.rline")
# print(self.cline)
# print(self.bestrl)
# print(self.rline[2])
self.conf, self.steering, self.throttle = self.setSteerThrottle(self.curpos, self.lline[2], self.cline[2], self.rline[2], self.conf)
print ("steer %f throt %f conf %d pos(%s)" % (self.steering, self.throttle, self.conf, self.strpos(self.pos[2])))
#######################
# print to screen
#######################
croi = copy.deepcopy(roi)
if self.lline is not None or self.rline is not None or self.cline is not None:
lrclines = []
str1 = "final: "
if self.lline[2] is not None:
lrclines.append(self.lline[2])
str1 += "ll "
if self.rline[2] is not None:
lrclines.append(self.rline[2])
str1 += "rl "
if self.cline[2] is not None:
lrclines.append(self.cline[2])
str1 += "cl "
print(str1)
for line in lrclines:
if line is not None:
x1 = line[0][0]
y1 = line[0][1]
x2 = line[1][0]
y2 = line[1][1]
cv2.line(croi,(x1,y1),(x2,y2),(0,255,0),2)
'''
cv2.imshow(str(self.seq),croi)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
if cfg.SAVE_MOVIE:
out = self.image_path(cfg.MOVIE_LOC, self.seq)
cv2.imwrite(out, croi)
print("wrote %s" % (out))
# ffmpeg -framerate 4 -i /tmp/movie4/1%03d_cam-image_array_.jpg -c:v libx264 -profile:v high -crf 20 -pix_fmt yuv420p output.mp4
self.TB.checkThrottle(self.throttle)
self.saveDonkeyState()
return self.steering, self.throttle
def binary_hsv_mask(self, img, color_range):
lower = np.array(color_range[0])
upper = np.array(color_range[1])
return cv2.inRange(img, lower, upper)
def process_img_color(self, img):
if self.TB.throttleCheckInProgress():
self.TB.setMinMaxThrottle(img)
simplecl = None
ymergedlines = None
wmergedlines = None
smergedlines = None
roi = self.getROI(img)
roi = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
cmask = self.binary_hsv_mask(roi, self.line_color_simple)
cimg = cv2.bitwise_and(roi, roi, mask = cmask)
# edges = cv2.Canny(roi, 100, 200)
hb = HoughBundler(self.width, self.height, self.bestll, self.bestcl, self.bestrl)
print("process_img_color ", self.scannymin, self.scannymax)
for i in range(self.scannymin, self.scannymax):
edges = cv2.Canny(cimg, i*10, i*20)
lines = cv2.HoughLinesP(edges, 1, np.pi/90, 13, 20, 20, 20)
# simple line follower
simplecl = hb.line_follower(lines)
print("simplecl: ", simplecl)
self.line_color_simple_count, self.line_color_simple_var, self.line_color_simple_mean = self.createLineIterator(simplecl, roi, self.line_color_simple_count, self.line_color_simple_var, self.line_color_simple_mean)
if simplecl is not None:
self.scannylast = i
| |
<reponame>Google-Autofuzz/clusterfuzz
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions."""
from builtins import map
from builtins import range
from builtins import str
from future import utils as future_utils
from past.builtins import basestring
from future import standard_library
standard_library.install_aliases()
import ast
import datetime
import functools
import gc
import hashlib
import inspect
import os
import random
import requests
import sys
import time
import urllib.parse
import urllib.request
import weakref
from base import errors
from base import memoize
from base import retry
from config import local_config
from metrics import logs
from system import environment
try:
import psutil
except ImportError:
psutil = None
# FIXME: Binary extensions list is still very basic.
BINARY_EXTENSIONS = [
# Media formats.
'.mp3',
'.ogg',
'.mp4',
'.webm',
# Image Formats.
'.png',
'.jpg',
'.gif',
# Misc.
'.pdf',
'.swf',
]
FUZZ_PREFIX = 'fuzz-'
TEXT_EXTENSIONS = [
'.css', '.js', '.htm', '.html', '.svg', '.xhtml', '.xht', '.xml', '.xsl'
]
URL_REQUEST_RETRIES = 5
URL_REQUEST_FAIL_WAIT = 1
WINDOWS_PREFIX_PATH = '\\\\?\\'
# Thread pool for use in function timeouts.
THREAD_POOL = None
LOCAL_SOURCE_MANIFEST = os.path.join('src', 'appengine', 'resources',
'clusterfuzz-source.manifest')
def utcnow():
"""Return datetime.datetime.utcnow(). We need this method because we can't
mock built-in methods."""
return datetime.datetime.utcnow() # pragma: no cover.
def current_date_time():
"""Returns current date and time."""
return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')
def utc_date_to_timestamp(date):
"""Converts a (UTC) datetime.date to a UNIX timestamp."""
return (date - datetime.date(1970, 1, 1)).total_seconds()
def utc_datetime_to_timestamp(dt):
"""Converts a (UTC) datetime.date to a UNIX timestamp."""
return (dt - datetime.datetime.utcfromtimestamp(0)).total_seconds()
# TODO(mbarbella): Clean up call-sites and delete this function. Any usage is
# potentially indicative of poor tracking of encodings.
def decode_to_unicode(obj, encoding='utf-8'):
"""Decode object to unicode encoding."""
if isinstance(obj, basestring) and not isinstance(obj, str):
try:
obj = str(obj, encoding)
except:
obj = str(''.join(char for char in obj if ord(char) < 128), encoding)
return obj
@retry.wrap(
retries=URL_REQUEST_RETRIES,
delay=URL_REQUEST_FAIL_WAIT,
function='base.utils.fetch_url')
def fetch_url(url):
"""Fetch url content."""
operations_timeout = environment.get_value('URL_BLOCKING_OPERATIONS_TIMEOUT')
response = requests.get(url, timeout=operations_timeout)
if response.status_code == 404:
return None
response.raise_for_status()
return response.text
def fields_match(string_1,
string_2,
field_separator=':',
allow_empty_fields=True):
"""Match fields of two strings, separated by a |field_separator|. Empty fields
can be ignored via |allow_empty_fields| flag."""
if string_1 is None or string_2 is None:
return False
if string_1 == string_2:
return True
string_1_fields = string_1.split(field_separator)
string_2_fields = string_2.split(field_separator)
if not allow_empty_fields and len(string_1_fields) != len(string_2_fields):
return False
min_fields_length = min(len(string_1_fields), len(string_2_fields))
for i in range(min_fields_length):
if string_1_fields[i] != string_2_fields[i]:
return False
return True
def file_path_to_file_url(path):
"""Return a path as a file scheme url."""
if not path:
return ''
path = path.lstrip(WINDOWS_PREFIX_PATH)
# TODO(mbarbella): urljoin has several type checks for arguments. Ensure that
# we're passing newstr on both sides while migrating to Python 3. After
# migrating, ensure that callers pass strs to avoid this hack.
return urllib.parse.urljoin(
str(u'file:'), str(urllib.request.pathname2url(path)))
def filter_file_list(file_list):
"""Filters file list by removing duplicates, non-existent files
and directories."""
filtered_file_list = []
for file_path in file_list:
if not os.path.exists(file_path):
continue
if os.path.isdir(file_path):
continue
# Do a os specific case normalization before comparison.
if (os.path.normcase(file_path) in list(
map(os.path.normcase, filtered_file_list))):
continue
filtered_file_list.append(file_path)
if len(filtered_file_list) != len(file_list):
logs.log('Filtered file list (%s) from (%s).' % (str(filtered_file_list),
str(file_list)))
return filtered_file_list
def find_binary_path(app_directory, binary_file_subpath):
"""Find the path to a binary given the app directory and the file name.
This is necessary as cov files are created in the root app directory, and we
need a way to find the corresponding binary to symbolize addresses."""
binary_path = os.path.join(app_directory, binary_file_subpath)
if os.path.exists(binary_path):
# Common case: the binary exists in the root directory.
return binary_path
# Match the longest file sub-path suffix.
binary_file_subpath_with_sep = binary_file_subpath
if not binary_file_subpath_with_sep.startswith(os.sep):
binary_file_subpath_with_sep = os.sep + binary_file_subpath_with_sep
for root, _, filenames in os.walk(app_directory):
for filename in filenames:
file_path = os.path.join(root, filename)
if file_path.endswith(binary_file_subpath_with_sep):
return file_path
# Otherwise, do a search for the filename.
binary_filename = os.path.basename(binary_file_subpath)
for root, _, filenames in os.walk(app_directory):
for filename in filenames:
if filename == binary_filename:
file_path = os.path.join(root, filename)
return file_path
return None
def get_application_id():
"""Return application id. Code simplified based off original implementation in
AppEngine SDK get_identity.get_application_id."""
app_id = environment.get_value('APPLICATION_ID')
if app_id is None:
return None
psep = app_id.find('~')
if psep > 0:
app_id = app_id[psep + 1:]
return app_id
def service_account_email():
"""Get the service account name."""
# TODO(ochang): Detect GCE and return the GCE service account instead.
email_id = get_application_id()
if ':' in email_id:
domain, application_id = email_id.split(':')
email_id = application_id + '.' + domain
return email_id + '@appspot.gserviceaccount.com'
def get_bot_testcases_file_path(input_directory):
"""Returns path to bot-specific fuzzed testcases."""
# Using |FUZZ_INPUTS| prevents putting high load on nfs servers for cases
# when |input_directory| is a cloud storage data bundle. We can't rely
# on |FUZZ_INPUTS| always since it might not be available during local fuzzer
# testing, so use |input_directory| if it is not defined.
local_testcases_directory = environment.get_value('FUZZ_INPUTS')
bot_testcases_directory = (
local_testcases_directory
if local_testcases_directory else input_directory)
bot_name = environment.get_value('BOT_NAME')
bot_testcases_filename = '.%s_testcases' % bot_name
bot_testcases_file_path = os.path.join(bot_testcases_directory,
bot_testcases_filename)
return bot_testcases_file_path
def get_crash_stacktrace_output(application_command_line,
symbolized_stacktrace,
unsymbolized_stacktrace=None,
build_type=None):
"""Return output string with symbolized and unsymbolized stacktraces
combined."""
def _guess_build_type(application_command_line):
if 'stable' in application_command_line:
return 'stable'
elif 'beta' in application_command_line:
return 'beta'
elif sub_string_exists_in(['debug', 'dbg'], application_command_line):
return 'debug'
return 'release'
separator = '-' * 40
if not build_type:
build_type = _guess_build_type(application_command_line)
crash_stacktraces_output = environment.get_environment_settings_as_string()
if application_command_line:
crash_stacktraces_output += (
'[Command line] %s\n\n' % application_command_line)
crash_stacktraces_output += ('+%s%s Build Stacktrace%s+\n%s' % (
separator, build_type.capitalize(), separator, symbolized_stacktrace))
# No unsymbolized stack available. Bail out.
if not unsymbolized_stacktrace:
return crash_stacktraces_output
unsymbolized_stacktrace_diff = get_unique_lines_in_unsymbolized_stack(
symbolized_stacktrace, unsymbolized_stacktrace)
if unsymbolized_stacktrace_diff:
crash_stacktraces_output += (
'\n\n+%s%s Build Unsymbolized Stacktrace (diff)%s+\n\n%s' %
(separator, build_type.capitalize(), separator,
unsymbolized_stacktrace_diff))
return crash_stacktraces_output
def get_directory_hash_for_path(file_path):
"""Return the directory hash for a file path (excludes file name)."""
root_directory = environment.get_value('ROOT_DIR')
directory_path = os.path.dirname(file_path)
normalized_directory_path = remove_prefix(directory_path,
root_directory + os.sep)
normalized_directory_path = normalized_directory_path.replace('\\', '/')
return string_hash(normalized_directory_path)
def get_file_contents_with_fatal_error_on_failure(path):
"""Return the contents of the specified file, or None on error."""
try:
with open(path, 'rb') as file_handle:
data = file_handle.read()
return data
except IOError:
logs.log_error('Unable to read file `%s\'' % path)
raise errors.BadStateError
def get_line_seperator(label=''):
"""Return a line seperator with an optional label."""
seperator = '-' * 40
result = '\n\n%s%s%s\n\n' % (seperator, label, seperator)
return result
def get_normalized_relative_path(file_path, directory_path):
"""Return normalized relative path for file w.r.t to a directory."""
normalized_relative_file_path = remove_prefix(file_path,
directory_path + os.sep)
normalized_relative_file_path = (
normalized_relative_file_path.replace('\\', '/'))
return normalized_relative_file_path
def get_path_without_ext(path):
"""Return a path excluding the extension."""
return os.path.splitext(path)[0]
def get_process_ids(process_id, recursive=True):
"""Return list of pids for a process and its descendants."""
# Try to find the running process.
if not psutil.pid_exists(process_id):
return []
pids = [process_id]
try:
psutil_handle = psutil.Process(process_id)
children = psutil_handle.children(recursive=recursive)
for child in children:
pids.append(child.pid)
except psutil.NoSuchProcess:
# Avoid too much logging when the process already died.
return []
except (psutil.AccessDenied, OSError):
logs.log_warn('Failed to get process children.')
return []
return pids
def get_line_count_string(line_count):
"""Return string representation for size."""
if line_count == 0:
return 'empty'
elif line_count == 1:
return '1 line'
return '%d lines' % line_count
def get_size_string(size):
"""Return string representation for size."""
if size < 1 << 10:
return '%d B' % size
elif size < 1 << 20:
return '%d KB' % (size >> 10)
elif size < 1 << 30:
return '%d MB' % (size >> 20)
return '%d GB' % (size >> 30)
def get_unique_lines_in_unsymbolized_stack(symbolized_stacktrace,
unsymbolized_stacktrace):
"""Return unique lines in unsymbolized stacktrace that are not in the
symbolized stacktrace."""
if symbolized_stacktrace == unsymbolized_stacktrace:
return ''
symbolized_stacktrace_lines = symbolized_stacktrace.splitlines()
unsymbolized_stacktrace_lines = unsymbolized_stacktrace.splitlines()
stripped_symbolized_stacktrace_lines = set()
for line in symbolized_stacktrace_lines:
stripped_symbolized_stacktrace_lines.add(line.strip())
index = 0
last_index = len(unsymbolized_stacktrace_lines) - 1
start = -1
end = -1
while index <= last_index:
if (unsymbolized_stacktrace_lines[index].strip() not in
stripped_symbolized_stacktrace_lines):
if start == -1:
start = index
end = index + 1
else:
end = index
index += 1
if start == -1:
# Nothing unique found, return empty string.
return ''
line_gap = 2
start = max(0, start - line_gap)
end = min(end + line_gap, last_index + 1)
result = '\n'.join(unsymbolized_stacktrace_lines[start:end])
return result
def indent_string(string, chars):
"""Indents a | |
+ m.b23 - m.b87 <= 0)
m.c505 = Constraint(expr= - m.b19 + m.b25 - m.b88 <= 0)
m.c506 = Constraint(expr= - m.b21 + m.b23 - m.b89 <= 0)
m.c507 = Constraint(expr= - m.b21 + m.b25 - m.b90 <= 0)
m.c508 = Constraint(expr= - m.b23 + m.b25 - m.b91 <= 0)
m.c509 = Constraint(expr= - m.b26 + m.b27 - m.b37 <= 0)
m.c510 = Constraint(expr= - m.b26 + m.b28 - m.b38 <= 0)
m.c511 = Constraint(expr= - m.b26 + m.b29 - m.b39 <= 0)
m.c512 = Constraint(expr= - m.b26 + m.b30 - m.b40 <= 0)
m.c513 = Constraint(expr= - m.b26 + m.b31 - m.b41 <= 0)
m.c514 = Constraint(expr= - m.b26 + m.b32 - m.b42 <= 0)
m.c515 = Constraint(expr= - m.b26 + m.b33 - m.b43 <= 0)
m.c516 = Constraint(expr= - m.b26 + m.b34 - m.b44 <= 0)
m.c517 = Constraint(expr= - m.b26 + m.b35 - m.b45 <= 0)
m.c518 = Constraint(expr= - m.b26 + m.b36 - m.b46 <= 0)
m.c519 = Constraint(expr= - m.b27 + m.b28 - m.b47 <= 0)
m.c520 = Constraint(expr= - m.b27 + m.b29 - m.b48 <= 0)
m.c521 = Constraint(expr= - m.b27 + m.b30 - m.b49 <= 0)
m.c522 = Constraint(expr= - m.b27 + m.b31 - m.b50 <= 0)
m.c523 = Constraint(expr= - m.b27 + m.b32 - m.b51 <= 0)
m.c524 = Constraint(expr= - m.b27 + m.b33 - m.b52 <= 0)
m.c525 = Constraint(expr= - m.b27 + m.b34 - m.b53 <= 0)
m.c526 = Constraint(expr= - m.b27 + m.b35 - m.b54 <= 0)
m.c527 = Constraint(expr= - m.b27 + m.b36 - m.b55 <= 0)
m.c528 = Constraint(expr= - m.b28 + m.b29 - m.b56 <= 0)
m.c529 = Constraint(expr= - m.b28 + m.b30 - m.b57 <= 0)
m.c530 = Constraint(expr= - m.b28 + m.b31 - m.b58 <= 0)
m.c531 = Constraint(expr= - m.b28 + m.b32 - m.b59 <= 0)
m.c532 = Constraint(expr= - m.b28 + m.b33 - m.b60 <= 0)
m.c533 = Constraint(expr= - m.b28 + m.b34 - m.b61 <= 0)
m.c534 = Constraint(expr= - m.b28 + m.b35 - m.b62 <= 0)
m.c535 = Constraint(expr= - m.b28 + m.b36 - m.b63 <= 0)
m.c536 = Constraint(expr= - m.b29 + m.b30 - m.b64 <= 0)
m.c537 = Constraint(expr= - m.b29 + m.b31 - m.b65 <= 0)
m.c538 = Constraint(expr= - m.b29 + m.b32 - m.b66 <= 0)
m.c539 = Constraint(expr= - m.b29 + m.b33 - m.b67 <= 0)
m.c540 = Constraint(expr= - m.b29 + m.b34 - m.b68 <= 0)
m.c541 = Constraint(expr= - m.b29 + m.b35 - m.b69 <= 0)
m.c542 = Constraint(expr= - m.b29 + m.b36 - m.b70 <= 0)
m.c543 = Constraint(expr= - m.b30 + m.b31 - m.b71 <= 0)
m.c544 = Constraint(expr= - m.b30 + m.b32 - m.b72 <= 0)
m.c545 = Constraint(expr= - m.b30 + m.b33 - m.b73 <= 0)
m.c546 = Constraint(expr= - m.b30 + m.b34 - m.b74 <= 0)
m.c547 = Constraint(expr= - m.b30 + m.b35 - m.b75 <= 0)
m.c548 = Constraint(expr= - m.b30 + m.b36 - m.b76 <= 0)
m.c549 = Constraint(expr= - m.b31 + m.b32 - m.b77 <= 0)
m.c550 = Constraint(expr= - m.b31 + m.b33 - m.b78 <= 0)
m.c551 = Constraint(expr= - m.b31 + m.b34 - m.b79 <= 0)
m.c552 = Constraint(expr= - m.b31 + m.b35 - m.b80 <= 0)
m.c553 = Constraint(expr= - m.b31 + m.b36 - m.b81 <= 0)
m.c554 = Constraint(expr= - m.b32 + m.b33 - m.b82 <= 0)
m.c555 = Constraint(expr= - m.b32 + m.b34 - m.b83 <= 0)
m.c556 = Constraint(expr= - m.b32 + m.b35 - m.b84 <= 0)
m.c557 = Constraint(expr= - m.b32 + m.b36 - m.b85 <= 0)
m.c558 = Constraint(expr= - m.b33 + m.b34 - m.b86 <= 0)
m.c559 = Constraint(expr= - m.b33 + m.b35 - m.b87 <= 0)
m.c560 = Constraint(expr= - m.b33 + m.b36 - m.b88 <= 0)
m.c561 = Constraint(expr= - m.b34 + m.b35 - m.b89 <= 0)
m.c562 = Constraint(expr= - m.b34 + m.b36 - m.b90 <= 0)
m.c563 = Constraint(expr= - m.b35 + m.b36 - m.b91 <= 0)
m.c564 = Constraint(expr= - m.b37 + m.b38 - m.b47 <= 0)
m.c565 = Constraint(expr= - m.b37 + m.b39 - m.b48 <= 0)
m.c566 = Constraint(expr= - m.b37 + m.b40 - m.b49 <= 0)
m.c567 = Constraint(expr= - m.b37 + m.b41 - m.b50 <= 0)
m.c568 = Constraint(expr= - m.b37 + m.b42 - m.b51 <= 0)
m.c569 = Constraint(expr= - m.b37 + m.b43 - m.b52 <= 0)
m.c570 = Constraint(expr= - m.b37 + m.b44 - m.b53 <= 0)
m.c571 = Constraint(expr= - m.b37 + m.b45 - m.b54 <= 0)
m.c572 = Constraint(expr= - m.b37 + m.b46 - m.b55 <= 0)
m.c573 = Constraint(expr= - m.b38 + m.b39 - m.b56 <= 0)
m.c574 = Constraint(expr= - m.b38 + m.b40 - m.b57 <= 0)
m.c575 = Constraint(expr= - m.b38 + m.b41 - m.b58 <= 0)
m.c576 = Constraint(expr= - m.b38 + m.b42 - m.b59 <= 0)
m.c577 = Constraint(expr= - m.b38 + m.b43 - m.b60 <= 0)
m.c578 = Constraint(expr= - m.b38 + m.b44 - m.b61 <= 0)
m.c579 = Constraint(expr= - m.b38 + m.b45 - m.b62 <= 0)
m.c580 = Constraint(expr= - m.b38 + m.b46 - m.b63 <= 0)
m.c581 = Constraint(expr= - m.b39 + m.b40 - m.b64 <= 0)
m.c582 = Constraint(expr= - m.b39 + m.b41 - m.b65 <= 0)
m.c583 = Constraint(expr= - m.b39 + m.b42 - m.b66 <= 0)
m.c584 = Constraint(expr= - m.b39 + m.b43 - m.b67 <= 0)
m.c585 = Constraint(expr= - m.b39 + m.b44 - m.b68 <= 0)
m.c586 = Constraint(expr= - m.b39 + m.b45 - m.b69 <= 0)
m.c587 = Constraint(expr= - m.b39 + m.b46 - m.b70 <= 0)
m.c588 = Constraint(expr= - m.b40 + m.b41 - m.b71 <= 0)
m.c589 = Constraint(expr= - m.b40 + m.b42 - m.b72 <= 0)
m.c590 = Constraint(expr= - m.b40 + m.b43 - m.b73 <= 0)
m.c591 = Constraint(expr= - m.b40 + m.b44 - m.b74 <= 0)
m.c592 = Constraint(expr= - m.b40 + m.b45 - m.b75 <= 0)
m.c593 = Constraint(expr= - m.b40 + m.b46 - m.b76 <= 0)
m.c594 = Constraint(expr= - m.b41 + m.b42 - m.b77 <= 0)
m.c595 = Constraint(expr= - m.b41 + m.b43 - m.b78 <= 0)
m.c596 = Constraint(expr= - m.b41 + m.b44 - m.b79 <= 0)
m.c597 = Constraint(expr= - m.b41 + m.b45 - m.b80 <= 0)
m.c598 = Constraint(expr= - m.b41 + m.b46 - m.b81 <= 0)
m.c599 = Constraint(expr= - m.b42 + m.b43 - m.b82 <= 0)
m.c600 = Constraint(expr= - m.b42 + m.b44 - m.b83 <= 0)
m.c601 = Constraint(expr= - m.b42 + m.b45 - m.b84 <= 0)
m.c602 = Constraint(expr= - m.b42 + m.b46 - m.b85 <= 0)
m.c603 = Constraint(expr= - m.b43 + m.b44 - m.b86 <= 0)
m.c604 = Constraint(expr= - m.b43 + m.b45 - m.b87 <= 0)
m.c605 = Constraint(expr= - m.b43 + m.b46 - m.b88 <= 0)
m.c606 = Constraint(expr= - m.b44 + m.b45 - m.b89 <= 0)
m.c607 = Constraint(expr= - m.b44 + m.b46 - m.b90 <= 0)
m.c608 = Constraint(expr= - m.b45 + m.b46 - m.b91 <= 0)
m.c609 = Constraint(expr= - m.b47 + m.b48 - m.b56 <= 0)
m.c610 = Constraint(expr= - m.b47 + m.b49 - m.b57 <= 0)
m.c611 = Constraint(expr= - m.b47 + m.b50 - m.b58 <= 0)
m.c612 = Constraint(expr= - m.b47 + m.b51 - m.b59 <= 0)
m.c613 = Constraint(expr= - m.b47 + m.b52 - m.b60 <= 0)
m.c614 = Constraint(expr= - m.b47 + m.b53 - m.b61 <= 0)
m.c615 = Constraint(expr= - m.b47 + m.b54 - m.b62 <= 0)
m.c616 = Constraint(expr= - m.b47 + m.b55 - m.b63 <= 0)
m.c617 = Constraint(expr= - m.b48 + m.b49 - m.b64 <= 0)
m.c618 = Constraint(expr= - m.b48 + m.b50 - m.b65 <= 0)
m.c619 = Constraint(expr= - m.b48 + m.b51 - m.b66 <= 0)
m.c620 = Constraint(expr= - m.b48 + m.b52 - m.b67 <= 0)
m.c621 = Constraint(expr= - m.b48 + m.b53 - m.b68 <= 0)
m.c622 = Constraint(expr= - m.b48 + m.b54 - m.b69 <= 0)
m.c623 = Constraint(expr= - m.b48 + m.b55 - m.b70 <= 0)
m.c624 = Constraint(expr= - m.b49 + m.b50 - m.b71 <= 0)
m.c625 = Constraint(expr= - m.b49 + m.b51 - m.b72 <= 0)
m.c626 = Constraint(expr= - m.b49 + m.b52 - m.b73 <= 0)
m.c627 = Constraint(expr= - m.b49 + m.b53 - m.b74 <= 0)
m.c628 = Constraint(expr= - m.b49 + m.b54 - m.b75 <= 0)
m.c629 = Constraint(expr= - m.b49 + m.b55 - m.b76 <= 0)
m.c630 = Constraint(expr= - m.b50 + m.b51 - m.b77 <= 0)
m.c631 = Constraint(expr= - m.b50 + m.b52 - m.b78 <= 0)
m.c632 = Constraint(expr= - m.b50 | |
import numpy as np
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
import collections
from src.utils.constants import PAD
from src.data.pytorch_datasets import pitch_to_ix, ks_to_ix
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#print("Device: " + str(device))
#https://github.com/sooftware/attentions/blob/master/attentions.py
class AdditiveAttention(nn.Module):
"""
Applies a additive attention (bahdanau) mechanism on the output features from the decoder.
Additive attention proposed in "Neural Machine Translation by Jointly Learning to Align and Translate" paper.
Args:
hidden_dim (int): dimesion of hidden state vector
Inputs: query, value
- **query** (batch_size, q_len, hidden_dim): tensor containing the output features from the decoder.
- **value** (batch_size, v_len, hidden_dim): tensor containing features of the encoded input sequence.
Returns: context, attn
- **context**: tensor containing the context vector from attention mechanism.
- **attn**: tensor containing the alignment from the encoder outputs.
Reference:
- **Neural Machine Translation by Jointly Learning to Align and Translate**: https://arxiv.org/abs/1409.0473
"""
def __init__(self, hidden_dim: int) -> None:
super(AdditiveAttention, self).__init__()
self.query_proj = nn.Linear(hidden_dim, hidden_dim, bias=False)
self.key_proj = nn.Linear(hidden_dim, hidden_dim, bias=False)
self.bias = nn.Parameter(torch.rand(hidden_dim).uniform_(-0.1, 0.1))
self.score_proj = nn.Linear(hidden_dim, 1)
def forward(self, query: Tensor, key: Tensor, value: Tensor):# -> Tuple[Tensor, Tensor]:
score = self.score_proj(torch.tanh(self.key_proj(key) + self.query_proj(query) + self.bias)).squeeze(-1)
attn = F.softmax(score, dim=-1)
context = torch.bmm(attn.unsqueeze(1), value)
context = torch.sum(context, dim=1, keepdim=True)
return context, attn
class DotProductAttention(nn.Module):
"""
Compute the dot products of the query with all values and apply a softmax function to obtain the weights on the values
"""
def __init__(self, hidden_dim):
super(DotProductAttention, self).__init__()
self.normalize = nn.LayerNorm(hidden_dim)
self.query_proj = nn.Linear(hidden_dim, hidden_dim, bias=True) #False?
self.value_proj = nn.Linear(hidden_dim, hidden_dim, bias=True)
def forward(self, query: Tensor, value: Tensor): #-> Tuple[Tensor, Tensor]:
batch_size, hidden_dim, input_size = query.size(0), query.size(2), value.size(1)
score = torch.bmm(self.query_proj(query), self.value_proj(value).transpose(1, 2))
attn = F.softmax(score.view(-1, input_size), dim=1).view(batch_size, -1, input_size)
context = torch.sum(torch.bmm(attn, value), dim=1, keepdim=True)
return context, attn
class DotProductAttention_nosum(nn.Module):
"""
Compute the dot products of the query with all values and apply a softmax function to obtain the weights on the values
"""
def __init__(self, hidden_dim):
super(DotProductAttention2, self).__init__()
self.normalize = nn.LayerNorm(hidden_dim)
self.query_proj = nn.Linear(hidden_dim, hidden_dim, bias=True) #False?
self.value_proj = nn.Linear(hidden_dim, hidden_dim, bias=True)
self.out_proj = nn.Linear(hidden_dim, hidden_dim, bias=True)
def forward(self, query: Tensor, value: Tensor): #-> Tuple[Tensor, Tensor]:
batch_size, hidden_dim, input_size = query.size(0), query.size(2), value.size(1)
score = torch.bmm(self.query_proj(query), self.value_proj(value).transpose(1, 2))
attn = F.softmax(score.view(-1, input_size), dim=1).view(batch_size, -1, input_size)
context = torch.bmm(attn, self.out_proj(value))
return context, attn
class PKSpellHierarchical_app1(nn.Module):
def __init__(
self,
input_dim=17,
hidden_dim=300,
pitch_to_ix=pitch_to_ix,
ks_to_ix=ks_to_ix,
hidden_dim2=24,
rnn_depth=1,
dropout=None,
dropout2=None,
cell_type="GRU",
bidirectional=True,
mode="both",
):
super(PKSpellHierarchical_app1, self).__init__()
self.n_out_pitch = len(pitch_to_ix)
self.n_out_ks = len(ks_to_ix)
if hidden_dim % 2 != 0:
raise ValueError("Hidden_dim must be an even integer")
if hidden_dim2 % 2 != 0:
raise ValueError("Hidden_dim2 must be an even integer")
self.hidden_dim = hidden_dim
self.hidden_dim2 = hidden_dim2
if cell_type == "GRU":
rnn_cell = nn.GRU
elif cell_type == "LSTM":
rnn_cell = nn.LSTM
else:
raise ValueError(f"Unknown RNN cell type: {cell_type}")
# RNN layer.
self.rnn = rnn_cell(
input_size=input_dim,
hidden_size=hidden_dim // 2 if bidirectional else hidden_dim,
bidirectional=bidirectional,
num_layers=rnn_depth,
)
self.hier_hidden = 256
self.hier_rnn = rnn_cell(
input_size=hidden_dim,
hidden_size=self.hier_hidden//2,
bidirectional=True,
num_layers=1,
)
self.att_layer = DotProductAttention(self.hier_hidden)
if dropout is not None and dropout > 0:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = None
if dropout2 is not None and dropout2 > 0:
self.dropout2 = nn.Dropout(p=dropout2)
else:
self.dropout2 = None
# Output layers.
self.top_layer_pitch = nn.Linear(hidden_dim+self.hier_hidden, self.n_out_pitch)
self.top_layer_ks = nn.Linear(self.hier_hidden, self.n_out_ks)
# Loss function that we will use during training.
self.loss_pitch = nn.CrossEntropyLoss(
reduction="mean", ignore_index=pitch_to_ix[PAD]
)
self.loss_ks = nn.CrossEntropyLoss(reduction="mean", ignore_index=ks_to_ix[PAD])
self.mode = mode
def compute_outputs(self, sentences, sentences_len, eoM):
sentences = nn.utils.rnn.pack_padded_sequence(sentences, sentences_len)
rnn_out, _ = self.rnn(sentences)
rnn_out, _ = nn.utils.rnn.pad_packed_sequence(rnn_out)
if self.dropout is not None:
rnn_out = self.dropout(rnn_out)
context_list = []
for i, s, eom, l in zip(range(len(sentences_len)), torch.transpose(rnn_out,0,1),torch.transpose(eoM,0,1),sentences_len):
nz = torch.nonzero(eom).squeeze()
lengths = torch.diff(nz.to(device),prepend=torch.tensor([-1]).to(device))
sentences_split = torch.tensor_split(s[:l.int()], nz.cpu())
sentences_split_pad = nn.utils.rnn.pad_sequence(sentences_split,batch_first=False)
packed = nn.utils.rnn.pack_padded_sequence(sentences_split_pad, lengths.cpu(),enforce_sorted=False)
rnn_o, h_n = self.hier_rnn(packed)
rnn_o, _ = nn.utils.rnn.pad_packed_sequence(rnn_o)
attn_output, attn_output_weights = self.att_layer(torch.transpose(rnn_o,0,1),torch.transpose(rnn_o,0,1))
context = attn_output.squeeze()
context = torch.repeat_interleave(context, lengths.int(), dim=0)
context_list.append(context)
out_context = nn.utils.rnn.pad_sequence(context_list,batch_first=True)
stacked = torch.cat((rnn_out,torch.transpose(out_context,0,1)),dim=2)
out_pitch = self.top_layer_pitch(stacked)
out_ks = self.top_layer_ks(torch.transpose(out_context,0,1))
return out_pitch, out_ks
def forward(self, sentences, pitches, keysignatures, sentences_len, eoM):
# First computes the predictions, and then the loss function.
# Compute the outputs. The shape is (max_len, n_sentences, n_labels).
scores_pitch, scores_ks = self.compute_outputs(sentences, sentences_len, eoM)
# Flatten the outputs and the gold-standard labels, to compute the loss.
# The input to this loss needs to be one 2-dimensional and one 1-dimensional tensor.
scores_pitch = scores_pitch.view(-1, self.n_out_pitch)
scores_ks = scores_ks.view(-1, self.n_out_ks)
pitches = pitches.view(-1)
keysignatures = keysignatures.view(-1)
if self.mode == "both":
loss = self.loss_pitch(scores_pitch, pitches) + self.loss_ks(
scores_ks, keysignatures
)
elif self.mode == "ks":
loss = self.loss_ks(scores_ks, keysignatures)
elif self.mode == "ps":
loss = self.loss_pitch(scores_pitch, pitches)
return loss
def predict(self, sentences, sentences_len, eoM):
# Compute the outputs from the linear units.
scores_pitch, scores_ks = self.compute_outputs(sentences, sentences_len, eoM)
# Select the top-scoring labels. The shape is now (max_len, n_sentences).
predicted_pitch = scores_pitch.argmax(dim=2)
predicted_ks = scores_ks.argmax(dim=2)
return (
[
predicted_pitch[: int(l), i].cpu().numpy()
for i, l in enumerate(sentences_len)
],
[
predicted_ks[: int(l), i].cpu().numpy()
for i, l in enumerate(sentences_len)
],
)
class PKSpellHierarchical_app2(nn.Module):
def __init__(
self,
input_dim=17,
hidden_dim=300,
pitch_to_ix=pitch_to_ix,
ks_to_ix=ks_to_ix,
hidden_dim2=24,
rnn_depth=1,
dropout=None,
dropout2=None,
cell_type="GRU",
bidirectional=True,
mode="both",
):
super(PKSpellHierarchical_app2, self).__init__()
self.n_out_pitch = len(pitch_to_ix)
self.n_out_ks = len(ks_to_ix)
if hidden_dim % 2 != 0:
raise ValueError("Hidden_dim must be an even integer")
if hidden_dim2 % 2 != 0:
raise ValueError("Hidden_dim2 must be an even integer")
self.hidden_dim = hidden_dim
self.hidden_dim2 = hidden_dim2
if cell_type == "GRU":
rnn_cell = nn.GRU
elif cell_type == "LSTM":
rnn_cell = nn.LSTM
else:
raise ValueError(f"Unknown RNN cell type: {cell_type}")
# RNN layer.
self.rnn = rnn_cell(
input_size=input_dim,
hidden_size=hidden_dim // 2 if bidirectional else hidden_dim,
bidirectional=bidirectional,
num_layers=rnn_depth,
)
self.hier_hidden = 256
self.hier_rnn = rnn_cell(
input_size=input_dim,
hidden_size=self.hier_hidden//2,
bidirectional=True,
num_layers=1,
)
self.att_layer = DotProductAttention(self.hier_hidden)
if dropout is not None and dropout > 0:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = None
if dropout2 is not None and dropout2 > 0:
self.dropout2 = nn.Dropout(p=dropout2)
else:
self.dropout2 = None
# Output layers.
self.top_layer_pitch = nn.Linear(hidden_dim+self.hier_hidden, self.n_out_pitch)
self.top_layer_ks = nn.Linear(self.hier_hidden, self.n_out_ks)
# Loss function that we will use during training.
self.loss_pitch = nn.CrossEntropyLoss(
reduction="mean", ignore_index=pitch_to_ix[PAD]
)
self.loss_ks = nn.CrossEntropyLoss(reduction="mean", ignore_index=ks_to_ix[PAD])
self.mode = mode
def compute_outputs(self, sentences, sentences_len, eoM):
context_list = []
for i, s, eom, l in zip(range(len(sentences_len)), torch.transpose(sentences,0,1),torch.transpose(eoM,0,1),sentences_len):
nz = torch.nonzero(eom).squeeze()
lengths = torch.diff(nz.to(device),prepend=torch.tensor([-1]).to(device))
sentences_split = torch.tensor_split(s[:l.int()], nz.cpu())
sentences_split_pad = nn.utils.rnn.pad_sequence(sentences_split,batch_first=False)
packed = nn.utils.rnn.pack_padded_sequence(sentences_split_pad, lengths.cpu(),enforce_sorted=False)
rnn_o, h_n = self.hier_rnn(packed)
rnn_o, _ = nn.utils.rnn.pad_packed_sequence(rnn_o)
attn_output, attn_output_weights = self.att_layer(torch.transpose(rnn_o,0,1),torch.transpose(rnn_o,0,1))
context = attn_output.squeeze()
context = torch.repeat_interleave(context, lengths.int(), dim=0)
context_list.append(context)
out_context = nn.utils.rnn.pad_sequence(context_list,batch_first=True)
sentences = nn.utils.rnn.pack_padded_sequence(sentences, sentences_len)
rnn_out, _ = self.rnn(sentences)
rnn_out, _ = nn.utils.rnn.pad_packed_sequence(rnn_out)
if self.dropout is not None:
rnn_out = self.dropout(rnn_out)
stacked = torch.cat((rnn_out,torch.transpose(out_context,0,1)),dim=2)
out_pitch = self.top_layer_pitch(stacked)
out_ks = self.top_layer_ks(torch.transpose(out_context,0,1))
return out_pitch, out_ks
def forward(self, sentences, pitches, keysignatures, sentences_len, eoM):
# First computes the predictions, and then the loss function.
# Compute the outputs. The shape is (max_len, n_sentences, n_labels).
scores_pitch, scores_ks = self.compute_outputs(sentences, sentences_len, eoM)
# Flatten the outputs and the gold-standard labels, to compute the loss.
# The input to this loss needs to be one 2-dimensional and one 1-dimensional tensor.
scores_pitch = scores_pitch.view(-1, self.n_out_pitch)
scores_ks = scores_ks.view(-1, self.n_out_ks)
pitches = pitches.view(-1)
keysignatures = keysignatures.view(-1)
if self.mode == "both":
loss = self.loss_pitch(scores_pitch, pitches) + self.loss_ks(
scores_ks, keysignatures
)
elif self.mode == "ks":
loss = self.loss_ks(scores_ks, keysignatures)
elif self.mode == "ps":
loss = self.loss_pitch(scores_pitch, pitches)
return loss
def predict(self, sentences, sentences_len, eoM):
# Compute the outputs from the linear units.
scores_pitch, scores_ks = self.compute_outputs(sentences, sentences_len, eoM)
# Select the top-scoring labels. The shape is now (max_len, n_sentences).
predicted_pitch = scores_pitch.argmax(dim=2)
predicted_ks = scores_ks.argmax(dim=2)
return (
[
predicted_pitch[: int(l), i].cpu().numpy()
for i, l in enumerate(sentences_len)
],
[
predicted_ks[: int(l), i].cpu().numpy()
for i, l in enumerate(sentences_len)
],
)
class PKSpellHierarchical_app3(nn.Module):
def __init__(
self,
input_dim=17,
hidden_dim=300,
pitch_to_ix=pitch_to_ix,
ks_to_ix=ks_to_ix,
hidden_dim2=24,
rnn_depth=1,
dropout=None,
dropout2=None,
cell_type="GRU",
bidirectional=True,
mode="both",
):
super(PKSpellHierarchical_app3, self).__init__()
self.n_out_pitch = len(pitch_to_ix)
self.n_out_ks = len(ks_to_ix)
if hidden_dim % 2 != 0:
raise ValueError("Hidden_dim must be an even integer")
if hidden_dim2 % 2 != 0:
raise ValueError("Hidden_dim2 must be an even integer")
self.hidden_dim = hidden_dim
self.hidden_dim2 = hidden_dim2
if cell_type == "GRU":
rnn_cell = | |
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
Version of MongoDB (either 5.0, 4.4, 4.2 or 4.0).
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@property
@pulumi.getter
def access(self) -> Optional[pulumi.Input['MdbMongodbClusterClusterConfigAccessArgs']]:
"""
Shows whether cluster has access to data lens. The structure is documented below.
"""
return pulumi.get(self, "access")
@access.setter
def access(self, value: Optional[pulumi.Input['MdbMongodbClusterClusterConfigAccessArgs']]):
pulumi.set(self, "access", value)
@property
@pulumi.getter(name="backupWindowStart")
def backup_window_start(self) -> Optional[pulumi.Input['MdbMongodbClusterClusterConfigBackupWindowStartArgs']]:
"""
Time to start the daily backup, in the UTC timezone. The structure is documented below.
"""
return pulumi.get(self, "backup_window_start")
@backup_window_start.setter
def backup_window_start(self, value: Optional[pulumi.Input['MdbMongodbClusterClusterConfigBackupWindowStartArgs']]):
pulumi.set(self, "backup_window_start", value)
@property
@pulumi.getter(name="featureCompatibilityVersion")
def feature_compatibility_version(self) -> Optional[pulumi.Input[str]]:
"""
Feature compatibility version of MongoDB. If not provided version is taken. Can be either `5.0`, `4.4`, `4.2` and `4.0`.
"""
return pulumi.get(self, "feature_compatibility_version")
@feature_compatibility_version.setter
def feature_compatibility_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "feature_compatibility_version", value)
@pulumi.input_type
class MdbMongodbClusterClusterConfigAccessArgs:
def __init__(__self__, *,
data_lens: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[bool] data_lens: Allow access for DataLens.
"""
if data_lens is not None:
pulumi.set(__self__, "data_lens", data_lens)
@property
@pulumi.getter(name="dataLens")
def data_lens(self) -> Optional[pulumi.Input[bool]]:
"""
Allow access for DataLens.
"""
return pulumi.get(self, "data_lens")
@data_lens.setter
def data_lens(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "data_lens", value)
@pulumi.input_type
class MdbMongodbClusterClusterConfigBackupWindowStartArgs:
def __init__(__self__, *,
hours: Optional[pulumi.Input[int]] = None,
minutes: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[int] hours: The hour at which backup will be started.
:param pulumi.Input[int] minutes: The minute at which backup will be started.
"""
if hours is not None:
pulumi.set(__self__, "hours", hours)
if minutes is not None:
pulumi.set(__self__, "minutes", minutes)
@property
@pulumi.getter
def hours(self) -> Optional[pulumi.Input[int]]:
"""
The hour at which backup will be started.
"""
return pulumi.get(self, "hours")
@hours.setter
def hours(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "hours", value)
@property
@pulumi.getter
def minutes(self) -> Optional[pulumi.Input[int]]:
"""
The minute at which backup will be started.
"""
return pulumi.get(self, "minutes")
@minutes.setter
def minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "minutes", value)
@pulumi.input_type
class MdbMongodbClusterDatabaseArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
:param pulumi.Input[str] name: The fully qualified domain name of the host. Computed on server side.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The fully qualified domain name of the host. Computed on server side.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class MdbMongodbClusterHostArgs:
def __init__(__self__, *,
subnet_id: pulumi.Input[str],
zone_id: pulumi.Input[str],
assign_public_ip: Optional[pulumi.Input[bool]] = None,
health: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
shard_name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] subnet_id: The ID of the subnet, to which the host belongs. The subnet must
be a part of the network to which the cluster belongs.
:param pulumi.Input[str] zone_id: The availability zone where the MongoDB host will be created.
For more information see [the official documentation](https://cloud.yandex.com/docs/overview/concepts/geo-scope).
:param pulumi.Input[bool] assign_public_ip: -(Optional) Should this host have assigned public IP assigned. Can be either `true` or `false`.
:param pulumi.Input[str] health: The health of the host.
:param pulumi.Input[str] name: The fully qualified domain name of the host. Computed on server side.
:param pulumi.Input[str] role: The role of the cluster (either PRIMARY or SECONDARY).
:param pulumi.Input[str] shard_name: The name of the shard to which the host belongs.
:param pulumi.Input[str] type: Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. A day and hour of window need to be specified with weekly window.
"""
pulumi.set(__self__, "subnet_id", subnet_id)
pulumi.set(__self__, "zone_id", zone_id)
if assign_public_ip is not None:
pulumi.set(__self__, "assign_public_ip", assign_public_ip)
if health is not None:
pulumi.set(__self__, "health", health)
if name is not None:
pulumi.set(__self__, "name", name)
if role is not None:
pulumi.set(__self__, "role", role)
if shard_name is not None:
pulumi.set(__self__, "shard_name", shard_name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Input[str]:
"""
The ID of the subnet, to which the host belongs. The subnet must
be a part of the network to which the cluster belongs.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="zoneId")
def zone_id(self) -> pulumi.Input[str]:
"""
The availability zone where the MongoDB host will be created.
For more information see [the official documentation](https://cloud.yandex.com/docs/overview/concepts/geo-scope).
"""
return pulumi.get(self, "zone_id")
@zone_id.setter
def zone_id(self, value: pulumi.Input[str]):
pulumi.set(self, "zone_id", value)
@property
@pulumi.getter(name="assignPublicIp")
def assign_public_ip(self) -> Optional[pulumi.Input[bool]]:
"""
-(Optional) Should this host have assigned public IP assigned. Can be either `true` or `false`.
"""
return pulumi.get(self, "assign_public_ip")
@assign_public_ip.setter
def assign_public_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "assign_public_ip", value)
@property
@pulumi.getter
def health(self) -> Optional[pulumi.Input[str]]:
"""
The health of the host.
"""
return pulumi.get(self, "health")
@health.setter
def health(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The fully qualified domain name of the host. Computed on server side.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
The role of the cluster (either PRIMARY or SECONDARY).
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@property
@pulumi.getter(name="shardName")
def shard_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the shard to which the host belongs.
"""
return pulumi.get(self, "shard_name")
@shard_name.setter
def shard_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shard_name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. A day and hour of window need to be specified with weekly window.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class MdbMongodbClusterMaintenanceWindowArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
day: Optional[pulumi.Input[str]] = None,
hour: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] type: Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. A day and hour of window need to be specified with weekly window.
:param pulumi.Input[str] day: Day of week for maintenance window if window type is weekly. Possible values: `MON`, `TUE`, `WED`, `THU`, `FRI`, `SAT`, `SUN`.
:param pulumi.Input[int] hour: Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
"""
pulumi.set(__self__, "type", type)
if day is not None:
pulumi.set(__self__, "day", day)
if hour is not None:
pulumi.set(__self__, "hour", hour)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. A day and hour of window need to be specified with weekly window.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def day(self) -> Optional[pulumi.Input[str]]:
"""
Day of week for maintenance window if window type is weekly. Possible values: `MON`, `TUE`, `WED`, `THU`, `FRI`, `SAT`, `SUN`.
"""
return pulumi.get(self, "day")
@day.setter
def day(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "day", value)
@property
@pulumi.getter
def hour(self) -> Optional[pulumi.Input[int]]:
"""
Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
"""
return pulumi.get(self, "hour")
@hour.setter
def hour(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "hour", value)
@pulumi.input_type
class MdbMongodbClusterResourcesArgs:
def __init__(__self__, *,
disk_size: pulumi.Input[int],
disk_type_id: pulumi.Input[str],
resource_preset_id: pulumi.Input[str]):
"""
:param pulumi.Input[int] disk_size: Volume of the storage available to a MongoDB host, in gigabytes.
:param pulumi.Input[str] disk_type_id: Type of the storage of MongoDB hosts.
For more information see [the official documentation](https://cloud.yandex.com/docs/managed-clickhouse/concepts/storage).
"""
pulumi.set(__self__, "disk_size", disk_size)
pulumi.set(__self__, "disk_type_id", disk_type_id)
pulumi.set(__self__, "resource_preset_id", resource_preset_id)
@property
@pulumi.getter(name="diskSize")
def disk_size(self) -> pulumi.Input[int]:
"""
Volume of the storage available to a MongoDB host, in gigabytes.
"""
return pulumi.get(self, "disk_size")
@disk_size.setter
def disk_size(self, value: pulumi.Input[int]):
pulumi.set(self, "disk_size", value)
@property
@pulumi.getter(name="diskTypeId")
def disk_type_id(self) -> pulumi.Input[str]:
"""
Type of the storage of MongoDB hosts.
For more information see [the official documentation](https://cloud.yandex.com/docs/managed-clickhouse/concepts/storage).
"""
return pulumi.get(self, "disk_type_id")
@disk_type_id.setter
def disk_type_id(self, value: pulumi.Input[str]):
pulumi.set(self, "disk_type_id", value)
@property
@pulumi.getter(name="resourcePresetId")
def resource_preset_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_preset_id")
@resource_preset_id.setter
def resource_preset_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_preset_id", value)
@pulumi.input_type
class MdbMongodbClusterUserArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
password: pulumi.Input[str],
permissions: Optional[pulumi.Input[Sequence[pulumi.Input['MdbMongodbClusterUserPermissionArgs']]]] = None):
"""
:param pulumi.Input[str] name: The fully qualified domain name of the host. Computed on server side.
:param pulumi.Input[str] password: <PASSWORD>.
:param pulumi.Input[Sequence[pulumi.Input['MdbMongodbClusterUserPermissionArgs']]] permissions: Set of permissions granted to the user. The structure is documented below.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "password", password)
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The fully qualified domain name of the host. Computed on server side.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
| |
1.00 22.98 C
ATOM 2161 C ARG A 315 6.921 -38.651 108.687 1.00 22.60 C
ATOM 2162 O ARG A 315 7.532 -38.196 109.653 1.00 22.85 O
ATOM 2163 CB ARG A 315 5.173 -37.516 107.323 1.00 23.23 C
ATOM 2164 CG ARG A 315 4.735 -36.167 107.842 1.00 24.91 C
ATOM 2165 CD ARG A 315 3.231 -36.143 108.070 1.00 28.73 C
ATOM 2166 NE ARG A 315 2.444 -36.339 106.853 1.00 31.17 N
ATOM 2167 CZ ARG A 315 2.358 -35.454 105.863 1.00 34.22 C
ATOM 2168 NH1 ARG A 315 3.018 -34.301 105.937 1.00 36.07 N
ATOM 2169 NH2 ARG A 315 1.596 -35.715 104.805 1.00 32.99 N
ATOM 2170 N ARG A 316 6.447 -39.893 108.657 1.00 22.00 N
ATOM 2171 CA ARG A 316 6.578 -40.806 109.792 1.00 21.99 C
ATOM 2172 C ARG A 316 7.984 -41.091 110.302 1.00 23.09 C
ATOM 2173 O ARG A 316 8.149 -41.529 111.439 1.00 23.65 O
ATOM 2174 CB ARG A 316 5.886 -42.136 109.475 1.00 21.22 C
ATOM 2175 CG ARG A 316 4.373 -42.042 109.402 1.00 23.38 C
ATOM 2176 CD ARG A 316 3.836 -42.668 108.123 1.00 26.73 C
ATOM 2177 NE ARG A 316 3.995 -44.121 108.085 1.00 28.42 N
ATOM 2178 CZ ARG A 316 4.479 -44.789 107.040 1.00 28.61 C
ATOM 2179 NH1 ARG A 316 4.857 -44.132 105.954 1.00 29.34 N
ATOM 2180 NH2 ARG A 316 4.571 -46.113 107.074 1.00 27.85 N
ATOM 2181 N ASP A 317 9.000 -40.846 109.485 1.00 25.17 N
ATOM 2182 CA ASP A 317 10.363 -41.127 109.913 1.00 26.92 C
ATOM 2183 C ASP A 317 11.076 -39.946 110.566 1.00 26.95 C
ATOM 2184 O ASP A 317 12.094 -40.123 111.234 1.00 27.91 O
ATOM 2185 CB ASP A 317 11.184 -41.652 108.733 1.00 31.20 C
ATOM 2186 CG ASP A 317 12.583 -42.075 109.139 1.00 35.93 C
ATOM 2187 OD1 ASP A 317 13.435 -41.187 109.360 1.00 40.17 O
ATOM 2188 OD2 ASP A 317 12.831 -43.295 109.250 1.00 37.90 O
ATOM 2189 N LEU A 318 10.550 -38.741 110.378 1.00 26.49 N
ATOM 2190 CA LEU A 318 11.155 -37.560 110.985 1.00 26.24 C
ATOM 2191 C LEU A 318 10.717 -37.537 112.448 1.00 26.95 C
ATOM 2192 O LEU A 318 9.800 -36.810 112.828 1.00 27.99 O
ATOM 2193 CB LEU A 318 10.693 -36.301 110.252 1.00 24.34 C
ATOM 2194 CG LEU A 318 11.071 -36.288 108.768 1.00 23.72 C
ATOM 2195 CD1 LEU A 318 10.394 -35.132 108.065 1.00 24.49 C
ATOM 2196 CD2 LEU A 318 12.580 -36.193 108.629 1.00 24.02 C
ATOM 2197 N THR A 319 11.392 -38.343 113.259 1.00 27.73 N
ATOM 2198 CA THR A 319 11.076 -38.486 114.676 1.00 27.93 C
ATOM 2199 C THR A 319 11.764 -37.518 115.637 1.00 28.67 C
ATOM 2200 O THR A 319 11.364 -37.407 116.795 1.00 29.01 O
ATOM 2201 CB THR A 319 11.379 -39.919 115.137 1.00 27.93 C
ATOM 2202 OG1 THR A 319 12.746 -40.235 114.841 1.00 27.70 O
ATOM 2203 CG2 THR A 319 10.472 -40.911 114.417 1.00 26.51 C
ATOM 2204 N THR A 320 12.797 -36.826 115.169 1.00 29.46 N
ATOM 2205 CA THR A 320 13.513 -35.872 116.011 1.00 29.47 C
ATOM 2206 C THR A 320 12.739 -34.564 116.150 1.00 30.76 C
ATOM 2207 O THR A 320 12.574 -34.041 117.251 1.00 31.05 O
ATOM 2208 CB THR A 320 14.900 -35.571 115.435 1.00 29.84 C
ATOM 2209 OG1 THR A 320 15.722 -36.738 115.556 1.00 28.30 O
ATOM 2210 CG2 THR A 320 15.548 -34.398 116.168 1.00 30.39 C
ATOM 2211 N SER A 321 12.274 -34.035 115.023 1.00 31.46 N
ATOM 2212 CA SER A 321 11.505 -32.799 115.014 1.00 31.46 C
ATOM 2213 C SER A 321 10.184 -33.067 114.316 1.00 30.56 C
ATOM 2214 O SER A 321 10.104 -33.934 113.448 1.00 32.00 O
ATOM 2215 CB SER A 321 12.270 -31.700 114.277 1.00 33.39 C
ATOM 2216 OG SER A 321 13.478 -31.389 114.950 1.00 37.51 O
ATOM 2217 N VAL A 322 9.149 -32.325 114.694 1.00 28.32 N
ATOM 2218 CA VAL A 322 7.829 -32.503 114.103 1.00 25.27 C
ATOM 2219 C VAL A 322 7.760 -31.974 112.671 1.00 24.50 C
ATOM 2220 O VAL A 322 8.001 -30.794 112.424 1.00 24.31 O
ATOM 2221 CB VAL A 322 6.756 -31.795 114.945 1.00 24.16 C
ATOM 2222 CG1 VAL A 322 5.380 -32.058 114.362 1.00 24.15 C
ATOM 2223 CG2 VAL A 322 6.830 -32.275 116.380 1.00 23.55 C
ATOM 2224 N PRO A 323 7.429 -32.850 111.708 1.00 23.60 N
ATOM 2225 CA PRO A 323 7.325 -32.477 110.294 1.00 22.62 C
ATOM 2226 C PRO A 323 6.166 -31.512 110.044 1.00 22.04 C
ATOM 2227 O PRO A 323 5.076 -31.683 110.594 1.00 21.04 O
ATOM 2228 CB PRO A 323 7.097 -33.816 109.596 1.00 22.18 C
ATOM 2229 CG PRO A 323 7.725 -34.803 110.523 1.00 23.36 C
ATOM 2230 CD PRO A 323 7.282 -34.306 111.868 1.00 23.35 C
ATOM 2231 N PRO A 324 6.389 -30.483 109.214 1.00 21.26 N
ATOM 2232 CA PRO A 324 5.345 -29.504 108.905 1.00 20.28 C
ATOM 2233 C PRO A 324 4.265 -30.149 108.052 1.00 20.15 C
ATOM 2234 O PRO A 324 4.394 -31.297 107.620 1.00 21.43 O
ATOM 2235 CB PRO A 324 6.093 -28.424 108.128 1.00 20.19 C
ATOM 2236 CG PRO A 324 7.499 -28.565 108.593 1.00 22.05 C
ATOM 2237 CD PRO A 324 7.676 -30.054 108.644 1.00 21.65 C
ATOM 2238 N VAL A 325 3.207 -29.394 107.797 1.00 18.93 N
ATOM 2239 CA VAL A 325 2.106 -29.878 106.986 1.00 17.74 C
ATOM 2240 C VAL A 325 1.533 -28.648 106.265 1.00 18.27 C
ATOM 2241 O VAL A 325 1.462 -27.564 106.845 1.00 19.21 O
ATOM 2242 CB VAL A 325 1.065 -30.589 107.891 1.00 15.23 C
ATOM 2243 CG1 VAL A 325 0.310 -29.581 108.721 1.00 16.64 C
ATOM 2244 CG2 VAL A 325 0.144 -31.438 107.066 1.00 16.39 C
ATOM 2245 N ALA A 326 1.160 -28.802 104.998 1.00 18.66 N
ATOM 2246 CA ALA A 326 0.636 -27.680 104.211 1.00 19.12 C
ATOM 2247 C ALA A 326 -0.783 -27.267 104.588 1.00 19.46 C
ATOM 2248 O ALA A 326 -1.755 -27.780 104.035 1.00 22.10 O
ATOM 2249 CB ALA A 326 0.699 -28.016 102.723 1.00 17.55 C
ATOM 2250 N LEU A 327 -0.898 -26.324 105.516 1.00 17.63 N
ATOM 2251 CA LEU A 327 -2.205 -25.855 105.961 1.00 16.12 C
ATOM 2252 C LEU A 327 -2.471 -24.440 105.467 1.00 16.88 C
ATOM 2253 O LEU A 327 -1.590 -23.795 104.901 1.00 18.47 O
ATOM 2254 CB LEU A 327 -2.279 -25.897 107.487 1.00 13.74 C
ATOM 2255 CG LEU A 327 -1.897 -27.240 108.115 1.00 11.17 C
ATOM 2256 CD1 LEU A 327 -1.930 -27.126 109.624 1.00 9.68 C
ATOM 2257 CD2 LEU A 327 -2.840 -28.330 107.633 1.00 9.69 C
ATOM 2258 N THR A 328 -3.692 -23.962 105.683 1.00 16.51 N
ATOM 2259 CA THR A 328 -4.076 -22.623 105.254 1.00 15.88 C
ATOM 2260 C THR A 328 -3.155 -21.559 105.881 1.00 15.66 C
ATOM 2261 O THR A 328 -3.011 -21.475 107.101 1.00 14.78 O
ATOM 2262 CB THR A 328 -5.577 -22.379 105.580 1.00 14.98 C
ATOM 2263 OG1 THR A 328 -5.835 -20.975 105.690 1.00 15.31 O
ATOM 2264 CG2 THR A 328 -5.968 -23.098 106.862 1.00 16.50 C
ATOM 2265 N ALA A 329 -2.535 -20.758 105.015 1.00 15.68 N
ATOM 2266 CA ALA A 329 -1.570 -19.718 105.391 1.00 15.70 C
ATOM 2267 C ALA A 329 -2.002 -18.573 106.305 1.00 16.13 C
ATOM 2268 O ALA A 329 -3.165 -18.169 106.317 1.00 15.82 O
ATOM 2269 CB ALA A 329 -0.949 -19.136 104.123 1.00 15.02 C
ATOM 2270 N THR A 330 -1.028 -18.050 107.054 1.00 17.52 N
ATOM 2271 CA THR A 330 -1.220 -16.930 107.984 1.00 18.76 C
ATOM 2272 C THR A 330 0.039 -16.068 108.050 1.00 18.94 C
ATOM 2273 O THR A 330 1.134 -16.520 107.713 1.00 18.98 O
ATOM 2274 CB THR A 330 -1.486 -17.400 109.427 1.00 18.55 C
ATOM 2275 OG1 THR A 330 -2.582 -18.314 109.441 1.00 25.08 O
ATOM 2276 CG2 THR A 330 -1.826 -16.215 110.316 1.00 17.33 C
ATOM 2277 | |
import os, gc, sys
import pygrib
import regionmask
import cartopy
import cartopy.crs as ccrs
import numpy as np
import pandas as pd
import xarray as xr
import geopandas as gpd
import multiprocessing as mp
import matplotlib.pyplot as plt
import matplotlib as mpl
from glob import glob
from numpy import trapz
from scipy.integrate import simps
from functools import partial
from matplotlib import gridspec
from datetime import datetime, timedelta
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import colors
from verif_config import *
from verif_funcs import *
import warnings
warnings.filterwarnings('ignore')
os.environ['OMP_NUM_THREADS'] = '1'
os.makedirs(tmp_dir, exist_ok=True)
if __name__ == '__main__':
cwa = sys.argv[1]
extract_dir = nbm_dir + 'extract/'
extract_flist = sorted(glob(extract_dir + '*'))
if not os.path.isfile(urma_dir + 'agg/urma_agg.nc'):
pass
#print('URMA aggregate not found')
else:
#print('Getting URMA aggregate from file')
urma = xr.open_dataset(urma_dir + 'agg/urma_agg.nc')['apcp24h_mm']
urma = urma/25.4
urma = urma.rename('apcp24h_in')
lons, lats = urma.lon, urma.lat
# # # # # # # # # # # # # # # # # # # # # # # # # # #
# Masking with regionmask and forecast zones shapefile
geodir = '../forecast-zones/'
zones_shapefile = glob(geodir + '*.shp')[0]
# Read the shapefile
zones = gpd.read_file(zones_shapefile)
# Prune to Western Region using TZ
zones = zones.set_index('TIME_ZONE').loc[['M', 'Mm', 'm', 'MP', 'P']].reset_index()
cwas = zones.dissolve(by='CWA').reset_index()[['CWA', 'geometry']]
_cwas = cwas.copy()
if cwa == 'WESTUS':
_cwas['CWA'] = 'WESTUS'
_cwas = _cwas.dissolve(by='CWA').reset_index()
bounds = _cwas.total_bounds
else:
bounds = _cwas[_cwas['CWA'] == cwa].bounds.values[0]
print('\ndomain: ', bounds, '\n\n')
lons, lats = urma.lon, urma.lat
mask = regionmask.mask_3D_geopandas(_cwas, lons, lats).rename({'region':'cwa'})
mask['cwa'] = _cwas.iloc[mask.cwa]['CWA'].values.astype(str)
mask = mask.sel(cwa=cwa)
idx = np.where(
(urma.lat >= bounds[1]) & (urma.lat <= bounds[3]) &
(urma.lon >= bounds[0]) & (urma.lon <= bounds[2]))
mask = mask.isel(y=slice(idx[0].min(), idx[0].max()), x=slice(idx[1].min(), idx[1].max()))
urma = urma.isel(y=slice(idx[0].min(), idx[0].max()), x=slice(idx[1].min(), idx[1].max()))
urma = urma.transpose('valid', 'y', 'x')
# # # # # # # # # # # # # # # # # # # # # # # # # # #
# Data processing core
fhrs = np.arange(fhr_start, fhr_end+1, fhr_step)
extract_pbin_stats_mp = partial(extract_pbin_stats, _urma=urma, _idx=idx, _mask=mask)
max_pool = 16 if cwa != 'WESTUS' else 4
pool_count = len(fhrs) if len(fhrs) < max_pool else max_pool
with mp.get_context('fork').Pool(pool_count) as p:
returns = p.map(extract_pbin_stats_mp, fhrs)
p.close()
p.join()
returns = np.array(returns, dtype=object).reshape(-1, 6)
data = {fhr:{threshold:[] for threshold in produce_thresholds} for fhr in fhrs}
for item in returns:
threshold, fhr = item[:2]
data[fhr][threshold].append(item[2:])
# # # # # # # # # # # # # # # # # # # # # # # # # # #
# Reliability Diagrams
for thresh in produce_thresholds:
fig = plt.figure(figsize=(9, 11), facecolor='w')
axs = gridspec.GridSpec(2, 1, height_ratios=[4, 1])
ax = plt.subplot(axs[0])
ax1 = plt.subplot(axs[1])
relplot_agg = []
shades = np.linspace(.05, .65, len(fhrs))
for i, fhr in enumerate(fhrs):
relplot = []
for bin_data in data[fhr][thresh]:
bins, n, N = bin_data[:-1]
center = np.mean(bins)/100
n = n.sum()
N = N.sum()
obs_rel_freq = (n/N)
relplot.append([center, obs_rel_freq, n.sum(), N.sum()])
relplot = np.array(relplot)
relplot_agg.append(relplot)
ax.plot(relplot[:, 0], relplot[:, 1], linewidth=1,
label='F%03d'%fhr, color=str(shades[i]),
#marker='+', markersize=15
)
relplot_agg = np.array(relplot_agg)
relplot_agg = np.array(
[relplot_agg.mean(axis=0)[:, 0],
relplot_agg.sum(axis=0)[:, 2]/relplot_agg.sum(axis=0)[:, 3]])
ax.plot(relplot_agg[0], relplot_agg[1], linewidth=2, color='r',
marker='+', markersize=15, label='ALL')
perfect = np.arange(0, 1.1, .1)
_urma, _mask = xr.broadcast(urma, mask)
_urma = xr.where(_mask, _urma, np.nan)
climo = xr.where((_urma > thresh), 1, 0).sum().values/_urma.size
skill = perfect - ((perfect - climo)/2)
ax.plot(perfect, perfect,
color='k')
ax.axhline(climo,
color='k', linestyle='--')
ax.plot(perfect, skill,
color='k', linestyle='--')
fillperf = np.arange(climo, 1, .001)
ax.fill_between(fillperf, fillperf - (fillperf - climo)/2, 1,
color='gray', alpha=0.35)
fillperf = np.arange(0, climo, .001)
ax.fill_between(fillperf, 0, fillperf - (fillperf - climo)/2,
color='gray', alpha=0.35)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.set_xticks(perfect)
ax.set_yticks(perfect)
ax.set_xlabel('Forecast Probability')
ax.set_ylabel('Observed Relative Frequency')
ax.grid(zorder=1)
date0, date1 = start_date.strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d')
ax.set_title((
'NBM Reliability | CWA: %s\n'%cwa +
'%s - %s\n'%(date0, date1) +
'%02dh Acc QPF | %3dh Lead Time\n\n'%(interval, fhr) +
'Probability of Exceeding %.2f"\n\n'%thresh +
'n forecast prob > 0: %2.1e | n observed > %.2f: %2.1e'%(
relplot[:, 2].sum(), thresh, relplot[:, 3].sum())))
ax.legend(loc='upper left')
# # # # # # # # # # # # # # # # # # # # # # # #
ax1.bar(relplot_agg[0], relplot[:, 3], color='k', width=0.095, zorder=10)
ax1.bar(relplot_agg[0], relplot[:, 2], color='r', alpha=0.25, width=0.095, zorder=11)
ax1.set_xticks(np.arange(0, 1.1, .1))
ax1.set_xlim([0, 1])
ax1.set_yscale('log')
ax1.set_yticks([1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9])
ax1.set_xlabel('Forecast Probability')
ax1.set_ylabel('# Forecasts')
ax1.grid(zorder=-1)
# plt.show()
savedir = fig_dir + '%s/%s/reliability/'%(ver, cwa)
os.makedirs(savedir, exist_ok=True)
savestr = 'nbm{}_{}_reliability_{}in.png'.format(
ver, cwa, ('%.2f'%thresh).replace('.', 'p'))
fig.subplots_adjust(top=0.85)
plt.savefig(savedir + savestr, dpi=200)
print('Saved: ', savestr)
plt.close()
# # # # # # # # # # # # # # # # # # # # # # # # # # #
# ROC by Lead Time
bin_list = [i[0] for i in data[fhrs[0]][produce_thresholds[0]]]
fig, ax = plt.subplots(1, facecolor='w', figsize=(12, 6))
for thresh in produce_thresholds:
rocplot_agg_data = {np.mean(bins)/100:{k:[] for k in ['a', 'b', 'c', 'd']} for bins in bin_list}
rocss_plot = []
for i, fhr in enumerate(fhrs):
rocplot = [[1, 1, 1]]
for bin_data in data[fhr][thresh]:
bins = bin_data[0]
center = np.mean(bins)/100
con_table = bin_data[-1]
a, b, c, d = [i.sum().values for i in con_table]
pod = a / (a + c)
far = b / (a + b)
rocplot.append([far, pod, center])
rocplot_agg_data[center]['a'].append(a)
rocplot_agg_data[center]['b'].append(b)
rocplot_agg_data[center]['c'].append(c)
rocplot_agg_data[center]['d'].append(d)
rocplot.append([0, 0, 0])
rocplot = np.array(rocplot)
# Use simpson's rule to calculate AUC
auc = simps(rocplot[:, 1], dx=bint)/100
roc_ss = 2 * (auc - 0.5)
rocss_plot.append([fhr, roc_ss, auc, thresh])
rocss_plot = np.array(rocss_plot)
ax.plot(rocss_plot[:, 0], rocss_plot[:, 1],
marker='x', markersize=10, linewidth=2,
label='> %.2f"'%np.unique(rocss_plot[:, 3])[0])
ax.axhline(0, color='k')
ax.set_xticks(rocss_plot[:, 0])
ax.set_xlabel('\nForecast Hour/Lead Time')
axx = ax.twinx()
ax.set_yticks(ax.get_yticks())
axx.set_yticks(ax.get_yticks())
axx.set_yticklabels(['%.1f'%v for v in ax.get_yticks()/2 + 0.5])
ax.set_ylabel('ROC Skill Score\n')
axx.set_ylabel('\nArea Under Curve (AUC)')
date0, date1 = start_date.strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d')
ax.set_title((
'NBM Relative Operating Characteristic | CWA: %s\n'%cwa +
'%s - %s\n'%(date0, date1) +
'%02dh Acc QPF\n\n'%(interval) +
'Probability of Exceeding Threshold\n'))
ax.grid()
ax.legend(loc='lower left')
# plt.show()
savedir = fig_dir + '%s/%s/roc/'%(ver, cwa)
os.makedirs(savedir, exist_ok=True)
savestr = 'nbm{}_{}_roc_leadtime.png'.format(ver, cwa)
fig.subplots_adjust(top=0.80)
plt.savefig(savedir + savestr, dpi=200)
print('Saved: ', savestr)
plt.close()
# # # # # # # # # # # # # # # # # # # # # # # # # # #
# ROC Curves by Threshold
bin_list = [i[0] for i in data[fhrs[0]][produce_thresholds[0]]]
for thresh in produce_thresholds:
rocplot_agg_data = {np.mean(bins)/100:{k:[] for k in ['a', 'b', 'c', 'd']} for bins in bin_list}
fig, ax = plt.subplots(figsize=(10, 10), facecolor='w')
shades = np.linspace(.05, .65, len(fhrs))
for i, fhr in enumerate(fhrs):
rocplot = [[1, 1, 1]]
for bin_data in data[fhr][thresh]:
bins = bin_data[0]
center = np.mean(bins)/100
con_table = bin_data[-1]
a, b, c, d = [i.sum().values for i in con_table]
pod = a / (a + c)
far = b / (a + b)
rocplot.append([far, pod, center])
rocplot_agg_data[center]['a'].append(a)
rocplot_agg_data[center]['b'].append(b)
rocplot_agg_data[center]['c'].append(c)
rocplot_agg_data[center]['d'].append(d)
rocplot.append([0, 0, 0])
rocplot = np.array(rocplot)
ax.plot(rocplot[:, 0], rocplot[:, 1], linewidth=1, label='F%03d'%fhr,
color=str(shades[i]))
rocplot_agg = [[1, 1, 1]]
for center in rocplot_agg_data.keys():
a, b, c, d = [np.sum(rocplot_agg_data[center][i]) for i in ['a', 'b', 'c', 'd']]
pod = a / (a + c)
far = b / (a + b)
rocplot_agg.append([far, pod, center])
rocplot_agg.append([0, 0, 0])
rocplot_agg = np.array(rocplot_agg)
# Use simpson's rule to calculate AUC
auc = simps(rocplot_agg[:, 1], dx=bint)/100
roc_ss = 2 * (auc - 0.5)
ax.plot(rocplot_agg[:, 0], rocplot_agg[:, 1], marker='o', markersize=7.5, color='r', linewidth=2)
[ax.text(x*1.04, y*.995, s, fontsize=10) for x, y, s in rocplot_agg[1:-1]]
ax.plot(np.arange(0, 1.1), np.arange(0, 1.1), 'k--')
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
date0, date1 = start_date.strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d')
ax.set_title((
'NBM Relative Operating Characteristic | CWA: %s\n'%cwa +
'%s - %s\n'%(date0, date1) +
'%02dh Acc QPF | %3dh Lead Time\n\n'%(interval, fhr) +
'Probability of Exceeding %.2f"\n'%thresh))
ax.set_xlabel('False Alarm Rate (POFD)')
ax.set_ylabel('Probability of Detection (POD)')
ax.text(.45, .42, 'No Skill', rotation=45, fontsize=14)
ax.text(.812, .055, 'ROCSS: %.2f'%roc_ss, rotation=0, fontsize=14, weight='bold')
ax.text(.85, .025, 'AUC: %.2f'%auc, rotation=0, fontsize=14, weight='bold')
| |
East Asian ideograph
0x21533F: (0x80CE, 0), # East Asian ideograph
0x215340: (0x80DE, 0), # East Asian ideograph
0x215341: (0x80E4, 0), # East Asian ideograph
0x215342: (0x80F0, 0), # East Asian ideograph
0x215343: (0x8102, 0), # East Asian ideograph
0x215344: (0x8105, 0), # East Asian ideograph
0x215345: (0x80F1, 0), # East Asian ideograph
0x215346: (0x80F4, 0), # East Asian ideograph
0x215347: (0x80ED, 0), # East Asian ideograph
0x235348: (0x9A10, 0), # East Asian ideograph
0x215349: (0x8106, 0), # East Asian ideograph
0x21534A: (0x80F3, 0), # East Asian ideograph
0x21534B: (0x80F8, 0), # East Asian ideograph
0x23534C: (0x9A24, 0), # East Asian ideograph
0x21534D: (0x8108, 0), # East Asian ideograph
0x21534E: (0x812B, 0), # East Asian ideograph
0x21534F: (0x812F, 0), # East Asian ideograph
0x215350: (0x8116, 0), # East Asian ideograph
0x225351: (0x71A4, 0), # East Asian ideograph
0x215352: (0x8129, 0), # East Asian ideograph
0x215353: (0x8155, 0), # East Asian ideograph
0x215354: (0x8154, 0), # East Asian ideograph
0x215355: (0x814B, 0), # East Asian ideograph
0x215356: (0x8151, 0), # East Asian ideograph
0x215357: (0x8150, 0), # East Asian ideograph
0x215358: (0x814E, 0), # East Asian ideograph
0x275359: (0x80C0, 0), # East Asian ideograph
0x21535A: (0x8146, 0), # East Asian ideograph
0x21535B: (0x813E, 0), # East Asian ideograph
0x21535C: (0x8171, 0), # East Asian ideograph
0x21535D: (0x8170, 0), # East Asian ideograph
0x21535E: (0x8178, 0), # East Asian ideograph
0x21535F: (0x8165, 0), # East Asian ideograph
0x215360: (0x816E, 0), # East Asian ideograph
0x215361: (0x8173, 0), # East Asian ideograph
0x275362: (0x80BF, 0), # East Asian ideograph
0x215363: (0x8179, 0), # East Asian ideograph
0x215364: (0x817A, 0), # East Asian ideograph
0x215365: (0x8166, 0), # East Asian ideograph
0x215366: (0x8180, 0), # East Asian ideograph
0x225367: (0x71D1, 0), # East Asian ideograph
0x215368: (0x817F, 0), # East Asian ideograph
0x215369: (0x818A, 0), # East Asian ideograph
0x21536A: (0x8188, 0), # East Asian ideograph
0x21536B: (0x819D, 0), # East Asian ideograph
0x21536C: (0x81A0, 0), # East Asian ideograph
0x22536D: (0x71CA, 0), # East Asian ideograph
0x21536E: (0x819A, 0), # East Asian ideograph
0x21536F: (0x819C, 0), # East Asian ideograph
0x215370: (0x81B3, 0), # East Asian ideograph
0x275371: (0x817B, 0), # East Asian ideograph
0x215372: (0x81A8, 0), # East Asian ideograph
0x215373: (0x81C6, 0), # East Asian ideograph
0x215374: (0x81BA, 0), # East Asian ideograph
0x215375: (0x81C3, 0), # East Asian ideograph
0x215376: (0x81C0, 0), # East Asian ideograph
0x215377: (0x81C2, 0), # East Asian ideograph
0x275378: (0x8113, 0), # East Asian ideograph
0x275379: (0x80C6, 0), # East Asian ideograph
0x27537A: (0x8138, 0), # East Asian ideograph
0x27537B: (0x810D, 0), # East Asian ideograph
0x21537C: (0x81CD, 0), # East Asian ideograph
0x27537D: (0x8191, 0), # East Asian ideograph
0x27537E: (0x814A, 0), # East Asian ideograph
0x234156: (0x91BF, 0), # East Asian ideograph
0x276B79: (0x523F, 0), # East Asian ideograph
0x6F584B: (0xCA0C, 0), # Korean hangul
0x2D3B3F: (0x5C02, 0), # East Asian ideograph
0x21313B: (0x4F43, 0), # East Asian ideograph
0x2D615A: (0x8EC6, 0), # East Asian ideograph
0x6F2526: (0x3161, 0), # Korean hangul
0x276B7A: (0x523D, 0), # East Asian ideograph
0x6F584C: (0xCA0D, 0), # Korean hangul
0x69543A: (0x57AA, 0), # East Asian ideograph
0x232349: (0x8497, 0), # East Asian ideograph
0x6F5279: (0xC0E5, 0), # Korean hangul
0x274C33: (0x6BD5, 0), # East Asian ideograph
0x213168: (0x4FC4, 0), # East Asian ideograph
0x22234B: (0x5C8D, 0), # East Asian ideograph
0x213F51: (0x61E3, 0), # East Asian ideograph
0x2D3279: (0x514E, 0), # East Asian ideograph
0x6F4E36: (0xB5B1, 0), # Korean hangul
0x6F2471: (0x3149, 0), # Korean hangul
0x6F584D: (0xCA18, 0), # Korean hangul
0x224674: (0x6C3F, 0), # East Asian ideograph
0x6F4929: (0xAC80, 0), # Korean hangul
0x6F5164: (0xBDD4, 0), # Korean hangul
0x217E21: (0x5B5B, 0), # East Asian ideograph
0x213169: (0x4FC2, 0), # East Asian ideograph
0x217158: (0x55CC, 0), # East Asian ideograph
0x233967: (0x8E27, 0), # East Asian ideograph
0x4B594A: (0x8AAD, 0), # East Asian ideograph
0x6F5158: (0xBD80, 0), # Korean hangul
0x6F584E: (0xCA4C, 0), # Korean hangul
0x6F7721: (0xAD35, 0), # Korean hangul
0x213E33: (0x6025, 0), # East Asian ideograph
0x295A28: (0x9E28, 0), # East Asian ideograph
0x2D4B72: (0x7506, 0), # East Asian ideograph
0x6F584F: (0xCA4D, 0), # Korean hangul
0x232358: (0x84B9, 0), # East Asian ideograph
0x347431: (0x58DC, 0), # East Asian ideograph
0x21715A: (0x55DB, 0), # East Asian ideograph
0x233969: (0x8E18, 0), # East Asian ideograph
0x215421: (0x81DA, 0), # East Asian ideograph
0x235422: (0x9A4D, 0), # East Asian ideograph
0x215423: (0x81E3, 0), # East Asian ideograph
0x235424: (0x9A52, 0), # East Asian ideograph
0x275425: (0x4E34, 0), # East Asian ideograph
0x215426: (0x81EA, 0), # East Asian ideograph
0x215427: (0x81EC, 0), # East Asian ideograph
0x215428: (0x81ED, 0), # East Asian ideograph
0x215429: (0x81F3, 0), # East Asian ideograph
0x22542A: (0x71DE, 0), # East Asian ideograph
0x21542B: (0x81FA, 0), # East Asian ideograph
0x21542C: (0x81FB, 0), # East Asian ideograph
0x21542D: (0x81FC, 0), # East Asian ideograph
0x21542E: (0x81FE, 0), # East Asian ideograph
0x21542F: (0x8200, 0), # East Asian ideograph
0x215430: (0x8202, 0), # East Asian ideograph
0x215431: (0x8205, 0), # East Asian ideograph
0x215432: (0x8207, 0), # East Asian ideograph
0x275433: (0x5174, 0), # East Asian ideograph
0x275434: (0x4E3E, 0), # East Asian ideograph
0x215435: (0x820A, 0), # East Asian ideograph
0x215436: (0x820C, 0), # East Asian ideograph
0x215437: (0x820D, 0), # East Asian ideograph
0x215438: (0x8210, 0), # East Asian ideograph
0x215439: (0x8212, 0), # East Asian ideograph
0x23543A: (0x9A6B, 0), # East Asian ideograph
0x21543B: (0x821B, 0), # East Asian ideograph
0x21543C: (0x821C, 0), # East Asian ideograph
0x21543D: (0x821E, 0), # East Asian ideograph
0x21543E: (0x821F, 0), # East Asian ideograph
0x21543F: (0x8222, 0), # East Asian ideograph
0x215440: (0x822A, 0), # East Asian ideograph
0x235441: (0x9AAB, 0), # East Asian ideograph
0x215442: (0x822C, 0), # East Asian ideograph
0x215443: (0x8228, 0), # East Asian ideograph
0x215444: (0x8237, 0), # East Asian ideograph
0x215445: (0x8235, 0), # East Asian ideograph
0x215446: (0x8239, 0), # East Asian ideograph
0x215447: (0x8236, 0), # East Asian ideograph
0x215448: (0x8247, 0), # East Asian ideograph
0x215449: (0x8258, 0), # East Asian ideograph
0x21544A: (0x8259, 0), # East Asian ideograph
0x21544B: (0x8266, 0), # East Asian ideograph
0x21544C: (0x826E, 0), # East Asian ideograph
0x21544D: (0x826F, 0), # East Asian ideograph
0x21544E: (0x8271, 0), # East Asian ideograph
0x21544F: (0x8272, 0), # East Asian ideograph
0x215450: (0x827E, 0), # East Asian ideograph
0x215451: (0x8292, 0), # East Asian ideograph
0x215452: (0x828B, 0), # East Asian ideograph
0x215453: (0x828D, 0), # East Asian ideograph
0x215454: (0x82B3, 0), # East Asian ideograph
0x215455: (0x829D, 0), # East Asian ideograph
0x215456: (0x8299, 0), # East Asian ideograph
0x215457: (0x82BD, 0), # East Asian ideograph
0x215458: (0x82AD, 0), # East Asian ideograph
0x215459: (0x82AC, 0), # East Asian ideograph
0x21545A: (0x82A5, 0), # East Asian ideograph
0x21545B: (0x829F, 0), # East Asian ideograph
0x27545C: (0x520D, 0), # East Asian ideograph
0x21545D: (0x82B1, 0), # East Asian ideograph
0x21545E: (0x82B9, 0), # East Asian ideograph
0x69545F: (0x58E5, 0), # East Asian ideograph
0x215460: (0x82E7, 0), # East Asian ideograph
0x215461: (0x8305, 0), # East Asian ideograph
0x215462: (0x8309, 0), # East Asian ideograph
0x215463: (0x82E3, 0), # East Asian ideograph
0x215464: (0x82DB, 0), # East Asian ideograph
0x215465: (0x82E6, 0), # East Asian ideograph
0x215466: (0x8304, 0), # East Asian ideograph
0x215467: (0x82E5, 0), # East Asian ideograph
0x215468: (0x8302, 0), # East Asian ideograph
0x215469: (0x82DC, 0), # East Asian ideograph
0x21546A: (0x82D7, 0), # East Asian ideograph
0x21546B: (0x82F1, 0), # East Asian ideograph
0x21546C: (0x8301, 0), # East Asian ideograph
0x23546D: (0x9AD6, 0), # East Asian ideograph
0x21546E: (0x82D4, 0), # East Asian ideograph
0x21546F: (0x82D1, 0), # East Asian ideograph
0x215470: (0x82DE, 0), # East Asian ideograph
0x215471: (0x82DF, 0), # East Asian ideograph
0x215472: (0x832B, | |
"relaxant": 1.0,
"agitated": -2.0,
"bastardised": -2.3,
"moronic": -2.7,
"disadvantages": -1.7,
"applaud": 2.0,
"doubtlessness": 0.8,
"mockers": -1.3,
"disadvantaged": -1.7,
"lamentation": -1.4,
"delicate": 0.2,
"weep": -2.7,
"confusedness": -1.5,
"longing": -0.1,
"cheerly": 2.4,
"stealings": -1.9,
"delectable": 2.9,
"killingly": -2.6,
"relief": 2.1,
"delectably": 2.8,
"inability": -1.7,
"offensively": -2.8,
"stammering": -1.0,
"adventurists": 1.2,
"risking": -1.3,
"dumbness": -1.9,
"violent": -2.9,
"kill": -3.7,
"dauntless": 2.3,
"\\=": -1.1,
"\\:": -1.0,
"hostilely": -2.2,
"dumbfounding": -0.8,
"dully": -1.1,
"poised": 1.0,
"dulls": -1.0,
"kills": -2.5,
"dumpier": -1.4,
"succeeders": 1.3,
"accepted": 1.1,
"romanticizing": 1.2,
"championing": 1.8,
"beneficed": 1.1,
"harmonising": 1.4,
"repressurizing": -0.1,
"benefices": 1.1,
"freestyles": 0.3,
"freestyler": 0.4,
"troublesomeness": -1.9,
"burdens": -1.5,
"joking": 0.9,
"respect": 2.1,
"intact": 0.8,
"intellections": 0.8,
"frees": 1.2,
"douchebag": -3.0,
"legal": 0.5,
"o:": -0.2,
"freed": 1.7,
"calmed": 1.6,
"welcome": 2.0,
"assuredness": 1.4,
"lmfao": 2.5,
"stereotyped": -1.2,
"weirdest": -0.9,
"calmer": 1.5,
"terrified": -3.0,
"trivialize": -1.1,
"warfares": -1.8,
"resolvers": 1.4,
"respectiveness": 1.1,
"ok": 1.2,
"zealous": 0.5,
"determinably": 0.9,
"victimhood": -2.0,
"repressible": -1.5,
"determinable": 0.9,
"stank": -1.9,
"annoyances": -1.8,
"wisecracked": -0.5,
"comforters": 1.2,
"vitalities": 1.2,
"trustable": 2.3,
"feudal": -0.8,
"empathetic": 1.7,
"trustor": 0.4,
"racism": -3.1,
"lowdowns": -0.2,
"racist": -3.0,
"crudenesses": -2.0,
"ranters": -1.2,
"evillest": -3.3,
"mischief": -1.5,
"merrymaking": 2.2,
"shamelessly": -1.4,
"trivium": -0.3,
"killdeers": -0.1,
"promote": 2,
"sentimentalists": 0.7,
"dullish": -1.1,
"excruciate": -2.7,
"merriness": 2.2,
"lonesomeness": -1.8,
"ineffectually": -1.1,
"ennui": -1.2,
"nerdier": -0.2,
"strongly": 1.1,
"frighted": -1.4,
"perfectas": 0.6,
"frighten": -1.4,
"dignitaries": 0.6,
"idiot": -2.3,
"smuggest": -1.5,
"benevolently": 1.4,
"splendours": 2.2,
"dreadnought": -0.6,
"dismayed": -1.9,
"promiscuousness": -0.9,
"contradicted": -1.3,
"serious": -0.3,
"feudalize": -0.5,
"applause": 1.8,
"uglier": -2.2,
"uglies": -2.0,
"disagreeing": -1.4,
"exhausting": -1.5,
"lamentations": -1.9,
"numbing": -1.1,
"amazedly": 2.1,
"talentless": -1.6,
"ptl": 2.6,
"lack": -1.3,
"antagonize": -2.0,
"depressurize": -0.5,
"catastrophe": -3.4,
"peacefullest": 3.1,
"safeties": 1.5,
"opportunities": 1.6,
"revengeful": -2.4,
"shameless": -1.4,
"positivists": 1.7,
"faultfinders": -1.5,
"worse": -2.1,
"infringement": -2.1,
"ticked": -1.8,
"fav": 2.0,
"induce": 3.1,
"weaponed": -1.4,
"fan": 1.3,
"awful": -2.0,
"fag": -2.1,
"fad": 0.9,
"sentimental": 1.3,
"defensiveness": -0.4,
"arguers": -1.4,
"liars": -2.4,
"bastardising": -2.6,
"villainess": -2.9,
"devotement": 1.5,
":\u00de": 1.1,
"warmonger": -2.9,
"nbd": 1.3,
"hesitation": -1.1,
"laughably": 1.2,
"advantageous": 1.5,
"overload": -1.5,
"crush": -0.6,
"frightens": -1.7,
"sux": -1.5,
"devoted": 1.7,
"disillusions": -1.6,
"soothed": 0.5,
"unprofessional": -2.3,
"overstatement": -1.1,
"yvw": 1.6,
"hopelessness": -3.1,
"relaxation": 2.4,
"untarnished": 1.6,
"tragedy": -3.4,
"tenderheartednesses": 2.8,
"graciousness": 2.4,
"mml": 2.0,
"dearths": -0.9,
"convincers": 0.3,
"idealise": 1.4,
"misericordes": -0.5,
"gleeful": 2.9,
"determinateness": 1.1,
"repressor": -1.4,
"tragedians": -1.0,
"problematically": -2.0,
"defeatures": -1.5,
"thankfully": 1.8,
"grandeurs": 2.1,
"fearfully": -2.2,
"improvers": 1.3,
"snub": -1.8,
"warship": -0.7,
"hurray": 2.7,
"irritate": -1.8,
"woefulness": -2.1,
"toughs": -0.8,
"shits": -2.1,
"hurrah": 2.6,
"angrier": -2.3,
"inquisition": -1.2,
"perfectible": 1.5,
"weakening": -1.3,
"foolfishes": -0.4,
"stutter": -1.0,
"unstable": -1.5,
"domination": -0.2,
"glad": 2.0,
"\\^:": -1.3,
"graveyard": -1.2,
"irritations": -1.5,
"ineffectual": -1.2,
"perverts": -2.8,
"freethinker": 1.0,
"embarrassing": -1.6,
"challenging": 0.6,
"shamefast": -1.0,
"abandonment": -2.4,
"whoredoms": -2.4,
"protects": 1.3,
"strongyl": 0.6,
"libertarian": 0.9,
"/-:": -1.3,
"toughie": -0.7,
"dreads": -1.4,
"desperation": -2.0,
"handsomer": 2.0,
"sentimentalizations": 0.4,
"okay": 0.9,
"depressives": -1.5,
"weaknesses": -1.5,
"adventure": 1.3,
"numb": -1.4,
"distrustfulness": -1.6,
"profited": 1.3,
"irritableness": -1.7,
"profiter": 0.7,
"cruelties": -2.3,
"romancers": 1.7,
"honourers": 1.6,
"outrages": -2.3,
"worrits": -1.2,
"charmeuses": 0.4,
"outraged": -2.5,
"trouble": -1.7,
"disagreed": -1.3,
"jollify": 2.1,
"adverse": -1.5,
"pray": 1.3,
"assaulting": -2.3,
"intellectuality": 1.7,
"harmless": 1.0,
"strange": -0.8,
"disagrees": -1.3,
"sentimentalizing": 0.8,
"naggers": -1.5,
"generously": 1.8,
"horribleness": -2.4,
"fuckface": -3.2,
"brutally": -3.0,
"belittle": -1.9,
"adventurous": 1.4,
"successional": 0.9,
"magnifically": 2.4,
"hho1/2k": 1.4,
"friendship": 1.9,
"pleasanter": 1.5,
"cutenesses": 1.9,
"riskily": -0.7,
"beneficent": 2.3,
"oppressed": -2.1,
"b^d": 2.6,
"humiliations": -2.4,
"5fs": 1.5,
"favoritisms": 0.7,
"seditious": -1.7,
"shamefaced": -2.3,
"foolproof": 1.6,
"disquiet": -1.3,
"paradise": 3.2,
"lolz": 2.7,
"induced": 3.1,
"boldness": 1.5,
"louses": -1.3,
"tenderest": 1.4,
"loused": -1.0,
"*<|:-)": 1.6,
"fantastical": 2.0,
"virtuosic": 2.2,
"stampede": -1.8,
"success": 2.7,
"strengtheners": 1.4,
"prejudice": -2.3,
"threat": -2.4,
"freebees": 1.3,
"fallen": -1.5,
"weepie": -0.4,
"unhealthy": -2.4,
"intelligibleness": 1.5,
"sympathy": 1.5,
"defects": -1.7,
"totalitarianism": -2.7,
"molests": -3.1,
"moan": -0.6,
":-||": -2.5,
"grime": -1.5,
"doomsday": -2.8,
"inhibitions": -0.8,
"grimy": -1.8,
"treasurer": 0.5,
"repetitive": -1.0,
"imperfect": -1.3,
"relieved": 1.6,
"convincer": 0.6,
"convinces": 0.7,
"luckily": 2.3,
"treasures": 1.8,
"horrifically": -2.9,
"brightnesses": 1.4,
"convinced": 1.7,
"reliever": 1.5,
"relieves": 1.5,
"insulter": -2.0,
"kind": 2.4,
"nagging": -1.7,
"rigidities": -0.7,
"wonderfulness": 2.9,
"rigidified": -0.7,
"excitability": 1.2,
"devotements": 1.1,
"outrageous": -2.0,
"stall": -0.8,
"risks": -1.1,
"insulted": -2.3,
"rigidifies": -0.6,
"restful": 1.5,
"risky": -0.8,
"cleaner": 0.7,
"motivation": 1.4,
"outstanding": 3.0,
"strengthen": 1.3,
"well": 1.1,
"suspicious": -1.5,
"warmly": 1.7,
"tricksier": -0.5,
"hopelessly": -2.2,
"toughen": 0.1,
"securitization": 0.2,
"perfectionistic": 0.7,
"/=": -0.9,
"lackadaisical": -1.6,
"defensibly": 0.1,
"warnings": -1.2,
"inconvenience": -1.5,
"reach": 0.1,
"foetid": -2.3,
"miserly": -1.4,
"amazonstones": 0.2,
"defensible": 0.8,
"boycotting": -1.7,
"reinvigoration": 2.2,
"destruct": -2.4,
"dynamometer": 0.3,
"hindrance": -1.7,
"lying": -2.4,
"hi5": 1.9,
"fond": 1.9,
"shares": 1.2,
"laments": -1.5,
"painfulness": -2.7,
"greediest": -2.8,
"aggravate": -2.5,
"bff": 2.9,
"bfe": -2.6,
"bfd": -2.7,
"beautifulness": 2.6,
"cruelness": -2.9,
"grouchier": -2.0,
"penalty": -2.0,
"shamefulness": -2.4,
"betray": -3.2,
"devilling": -1.8,
"gains": 1.4,
"strongholds": 1.0,
"grouchiest": -2.3,
"revered": 2.3,
"hid": -0.4,
"savagery": -2.5,
"freedman": 1.1,
"grievances": -1.5,
"keenest": 1.9,
"lamebrained": -2.5,
"adorers": 2.1,
"relentless": 0.2,
"repulsed": -2.2,
"excellences": 2.5,
"dump": -1.6,
"intelligence": 2.1,
"wimpish": -1.6,
"defense": 0.5,
"dumb": -2.3,
"cleverly": 2.3,
"romances": 1.3,
"romancer": 1.3,
"threats": -1.8,
"gravestones": -0.5,
"naggiest": -2.4,
"degrading": -2.8,
"stressor": -1.8,
"mistakers": -1.6,
"shylocking": -1.5,
"bitterns": -0.4,
"harmonicist": 0.5,
"startlers": -0.5,
"lamenting": -2.0,
"cancelling": -0.8,
"creating": 1.2,
"creatine": 0.2,
"favorableness": 2.2,
"smileys": 1.5,
"fatality": -3.5,
"competent": 1.3,
"dangering": -2.5,
"hesitant": -1.0,
"divinise": 0.5,
"]:": -1.6,
"cynic": -1.4,
"distress": -2.4,
"sweet": 2.0,
"regretfully": -1.9,
"suckers": -2.3,
"distrusting": -2.1,
"safecracking": -0.9,
"pu": -1.1,
"dud": -1.0,
"apprehensively": -0.3,
"po": -2.6,
"flirtatiousness": 0.6,
"affectionate": 1.9,
"evildoer": -3.1,
"unhappier": -2.4,
"misbehaving": -1.7,
"demand": -0.5,
"stolen": -2.2,
"snubbed": -2.0,
"egotists": -1.7,
"vitally": 1.1,
"deviltries": -1.5,
"disappointing": -2.2,
"ecstasy": 2.9,
"obstacle": -1.5,
"frantic": -1.9,
"rig": -0.5,
"anguish": -2.9,
"discomfortable": -1.6,
"inspirationally": 2.3,
"stimulates": 1.0,
"excruciation": -3.4,
"sweet<3": 3.0,
"stimulated": 0.9,
"successiveness": 1.0,
"esteemed": 1.9,
"itchy": -1.1,
"absentees": -0.8,
"painless": 1.2,
"holidays": 1.6,
"peacemaking": 1.7,
"robust": 1.4,
"lamenters": -0.5,
"weird": -0.7,
"starves": -2.3,
"lower": -1.2,
"beautifulest": 2.6,
"hagd": 2.2,
";^)": 1.4,
"hagn": 2.2,
"hago": 1.2,
"devastative": -3.2,
"cheer": 2.3,
"lowed": -0.8,
"inspirational": 2.3,
"dulling": -1.1,
"teashops": 0.2,
"chuckleheads": -1.1,
"gorgeously": 2.3,
"abilities": 1.0,
"reliant": 0.5,
"warmheartedness": 2.7,
"competitive": 0.7,
"exonerating": 1.0,
"vigilant": 0.7,
"contemptuousness": -1.1,
"profitable": 1.9,
"likeable": 2.0,
"problematic": -1.9,
"violations": -2.4,
"profitably": 1.6,
"complaint": -1.2,
"complains": -1.6,
"vindicate": 0.3,
"joyless": -2.5,
"condemnation": -2.8,
"boost": 1.7,
"trickinesses": -0.4,
"lowbrow": -1.9,
"moody": -1.5,
"skeptical": -1.3,
"forbidders": -1.5,
"confront": -0.7,
"ignore": -1.5,
"distrust": -1.8,
"treasurership": 0.4,
"divining": 0.9,
"acceptation": 1.3,
"certainties": 0.9,
"disinclined": -1.1,
"selfishness": -1.7,
"sprightly": 2.0,
"emptiers": -0.7,
"glamorise": 1.3,
"stingy": -1.6,
"innovation": 1.6,
"apathetic": -1.2,
"jumpy": -1.0,
"zzz": -1.2,
"refused": -1.2,
"affectionally": 1.5,
"sunniest": 2.4,
"intense": 0.3,
"unconfirmed": -0.5,
"cautious": -0.4,
"niceness": 1.6,
"firing": -1.4,
"frightful": -2.3,
"stricken": -2.3,
"greets": 0.6,
"hatred": -3.2,
"complimentary": 1.9,
"restrict": -1.6,
"murderees": -3.1,
"beneficence": 2.8,
"glooming": -1.8,
"piteous": -1.2,
"kissers": 1.5,
"stench": -2.3,
"impressed": 2.1,
"prettied": 1.6,
"xoxo": 3.0,
"pretties": 1.7,
"prettier": 2.1,
"impresses": 2.1,
"fearfulness": -1.8,
"audacious": 0.9,
"gracefulness": 2.2,
"freeloads": -1.3,
"reassurance": 1.5,
"shakiest": -1.2,
"forbidding": -1.9,
"lowlinesses": -1.2,
"astounds": 2.1,
"gratifying": 2.3,
"perfectness": 3.0,
"confusingly": -1.4,
"highlight": 1.4,
"divinatory": 1.6,
"tendernesses": 0.9,
"freak": -1.9,
"worthwhile": 1.4,
"dismay": -1.8,
"warning": -1.4,
"freakiness": -1.4,
"disappoints": -1.6,
"rainy": -0.3,
"evils": -2.7,
"scarer": -1.7,
"peace": 2.5,
"backs": -0.2,
"masochists": -1.2,
"mock": -1.8,
"nice": 1.8,
"shylocks": -1.4,
"activenesses": 0.8,
"energizers": 1.7,
"problems": -1.7,
"helping": 1.2,
"lucking": 1.2,
"flirtation": 1.7,
"suffers": -2.1,
"happier": 2.4,
"gr8": 2.7,
"attacking": -2.0,
"impatient": -1.2,
"ungratefully": -1.8,
"teases": -1.2,
"impatiens": -0.2,
"pervert": -2.3,
"dreadnoughts": -0.4,
"lowland": -0.1,
"lmbao": 1.8,
"infatuation": 0.6,
"dumbbell": -0.8,
"honoraria": 0.6,
"rigidity": -0.7,
"humoresque": 1.2,
"humored": 1.2,
"obstinate": -1.2,
"stable": 1.2,
"ridiculousness": -1.1,
"egotistic": -1.4,
"degrades": -2.1,
"slickest": 0.3,
"wishes": 0.6,
"tranquilizer": -0.1,
"cover-up": -1.2,
"stinkbug": -0.2,
"ruinously": -2.6,
"elegancies": 1.6,
"petrifications": -0.4,
"defeats": -1.3,
"gullible": -1.5,
"tenderness": 1.8,
"painlessness": 0.4,
"idealize": 1.2,
"friendliness": 2.0,
"grimalkins": -0.9,
">:o": -1.2,
"surefire": 1.0,
"frighting": -1.5,
"gorgeousnesses": 2.1,
"manipulating": -1.5,
"appeased": 0.9,
"appeases": 0.9,
"resigners": -1.0,
"immoralists": -1.7,
"fearlessness": 1.1,
| |
API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
response = client.get_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.GetTransitionRouteGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_get_transition_route_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
client.get_transition_route_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.GetTransitionRouteGroupRequest()
@pytest.mark.asyncio
async def test_get_transition_route_group_async(
transport: str = "grpc_asyncio",
request_type=transition_route_group.GetTransitionRouteGroupRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
)
response = await client.get_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == transition_route_group.GetTransitionRouteGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_get_transition_route_group_async_from_dict():
await test_get_transition_route_group_async(request_type=dict)
def test_get_transition_route_group_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.GetTransitionRouteGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
call.return_value = transition_route_group.TransitionRouteGroup()
client.get_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_transition_route_group_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = transition_route_group.GetTransitionRouteGroupRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.TransitionRouteGroup()
)
await client.get_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_transition_route_group_flattened():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.TransitionRouteGroup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_transition_route_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_transition_route_group_flattened_error():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_transition_route_group(
transition_route_group.GetTransitionRouteGroupRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_transition_route_group_flattened_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = transition_route_group.TransitionRouteGroup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
transition_route_group.TransitionRouteGroup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_transition_route_group(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_transition_route_group_flattened_error_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_transition_route_group(
transition_route_group.GetTransitionRouteGroupRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[gcdc_transition_route_group.CreateTransitionRouteGroupRequest, dict,],
)
def test_create_transition_route_group(request_type, transport: str = "grpc"):
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gcdc_transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
response = client.create_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_create_transition_route_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
client.create_transition_route_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
)
@pytest.mark.asyncio
async def test_create_transition_route_group_async(
transport: str = "grpc_asyncio",
request_type=gcdc_transition_route_group.CreateTransitionRouteGroupRequest,
):
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup(
name="name_value", display_name="display_name_value",
)
)
response = await client.create_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert (
args[0] == gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(response, gcdc_transition_route_group.TransitionRouteGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_create_transition_route_group_async_from_dict():
await test_create_transition_route_group_async(request_type=dict)
def test_create_transition_route_group_field_headers():
client = TransitionRouteGroupsClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
call.return_value = gcdc_transition_route_group.TransitionRouteGroup()
client.create_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_transition_route_group_field_headers_async():
client = TransitionRouteGroupsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcdc_transition_route_group.CreateTransitionRouteGroupRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_transition_route_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gcdc_transition_route_group.TransitionRouteGroup()
)
await client.create_transition_route_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def | |
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
"""Content - a MIME-like Content object."""
__all__ = [
'attach_file',
'Content',
'content_from_file',
'content_from_stream',
'json_content',
'text_content',
'TracebackContent',
]
import codecs
import inspect
import json
import os
import sys
from extras import try_import
# To let setup.py work, make this a conditional import.
traceback = try_import('traceback2')
from testtools.compat import (
_b,
_u,
istext,
str_is_unicode,
)
from testtools.content_type import ContentType, JSON, UTF8_TEXT
functools = try_import('functools')
_join_b = _b("").join
DEFAULT_CHUNK_SIZE = 4096
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
def _iter_chunks(stream, chunk_size, seek_offset=None, seek_whence=0):
"""Read 'stream' in chunks of 'chunk_size'.
:param stream: A file-like object to read from.
:param chunk_size: The size of each read from 'stream'.
:param seek_offset: If non-None, seek before iterating.
:param seek_whence: Pass through to the seek call, if seeking.
"""
if seek_offset is not None:
stream.seek(seek_offset, seek_whence)
chunk = stream.read(chunk_size)
while chunk:
yield chunk
chunk = stream.read(chunk_size)
class Content(object):
"""A MIME-like Content object.
'Content' objects can be serialised to bytes using the iter_bytes method.
If the 'Content-Type' is recognised by other code, they are welcome to
look for richer contents that mere byte serialisation - for example in
memory object graphs etc. However, such code MUST be prepared to receive
a generic 'Content' object that has been reconstructed from a byte stream.
:ivar content_type: The content type of this Content.
"""
def __init__(self, content_type, get_bytes):
"""Create a ContentType."""
if None in (content_type, get_bytes):
raise ValueError("None not permitted in %r, %r" % (
content_type, get_bytes))
self.content_type = content_type
self._get_bytes = get_bytes
def __eq__(self, other):
return (self.content_type == other.content_type and
_join_b(self.iter_bytes()) == _join_b(other.iter_bytes()))
def as_text(self):
"""Return all of the content as text.
This is only valid where ``iter_text`` is. It will load all of the
content into memory. Where this is a concern, use ``iter_text``
instead.
"""
return _u('').join(self.iter_text())
def iter_bytes(self):
"""Iterate over bytestrings of the serialised content."""
return self._get_bytes()
def iter_text(self):
"""Iterate over the text of the serialised content.
This is only valid for text MIME types, and will use ISO-8859-1 if
no charset parameter is present in the MIME type. (This is somewhat
arbitrary, but consistent with RFC2617 3.7.1).
:raises ValueError: If the content type is not text/\\*.
"""
if self.content_type.type != "text":
raise ValueError("Not a text type %r" % self.content_type)
return self._iter_text()
def _iter_text(self):
"""Worker for iter_text - does the decoding."""
encoding = self.content_type.parameters.get('charset', 'ISO-8859-1')
decoder = codecs.getincrementaldecoder(encoding)()
for bytes in self.iter_bytes():
yield decoder.decode(bytes)
final = decoder.decode(_b(''), True)
if final:
yield final
def __repr__(self):
return "<Content type=%r, value=%r>" % (
self.content_type, _join_b(self.iter_bytes()))
class StackLinesContent(Content):
"""Content object for stack lines.
This adapts a list of "preprocessed" stack lines into a 'Content' object.
The stack lines are most likely produced from ``traceback.extract_stack``
or ``traceback.extract_tb``.
text/x-traceback;language=python is used for the mime type, in order to
provide room for other languages to format their tracebacks differently.
"""
# Whether or not to hide layers of the stack trace that are
# unittest/testtools internal code. Defaults to True since the
# system-under-test is rarely unittest or testtools.
HIDE_INTERNAL_STACK = True
def __init__(self, stack_lines, prefix_content="", postfix_content=""):
"""Create a StackLinesContent for ``stack_lines``.
:param stack_lines: A list of preprocessed stack lines, probably
obtained by calling ``traceback.extract_stack`` or
``traceback.extract_tb``.
:param prefix_content: If specified, a unicode string to prepend to the
text content.
:param postfix_content: If specified, a unicode string to append to the
text content.
"""
content_type = ContentType('text', 'x-traceback',
{"language": "python", "charset": "utf8"})
value = prefix_content + \
self._stack_lines_to_unicode(stack_lines) + \
postfix_content
super(StackLinesContent, self).__init__(
content_type, lambda: [value.encode("utf8")])
def _stack_lines_to_unicode(self, stack_lines):
"""Converts a list of pre-processed stack lines into a unicode string.
"""
msg_lines = traceback.format_list(stack_lines)
return _u('').join(msg_lines)
class TracebackContent(Content):
"""Content object for tracebacks.
This adapts an exc_info tuple to the 'Content' interface.
'text/x-traceback;language=python' is used for the mime type, in order to
provide room for other languages to format their tracebacks differently.
"""
def __init__(self, err, test, capture_locals=False):
"""Create a TracebackContent for ``err``.
:param err: An exc_info error tuple.
:param test: A test object used to obtain failureException.
:param capture_locals: If true, show locals in the traceback.
"""
if err is None:
raise ValueError("err may not be None")
exctype, value, tb = err
# Skip test runner traceback levels
if StackLinesContent.HIDE_INTERNAL_STACK:
while tb and '__unittest' in tb.tb_frame.f_globals:
tb = tb.tb_next
limit = None
# Disabled due to https://bugs.launchpad.net/testtools/+bug/1188420
if (False
and StackLinesContent.HIDE_INTERNAL_STACK
and test.failureException
and isinstance(value, test.failureException)):
# Skip assert*() traceback levels
limit = 0
while tb and not self._is_relevant_tb_level(tb):
limit += 1
tb = tb.tb_next
stack_lines = list(traceback.TracebackException(exctype, value, tb,
limit=limit, capture_locals=capture_locals).format())
content_type = ContentType('text', 'x-traceback',
{"language": "python", "charset": "utf8"})
super(TracebackContent, self).__init__(
content_type, lambda: [x.encode('utf8') for x in stack_lines])
def StacktraceContent(prefix_content="", postfix_content=""):
"""Content object for stack traces.
This function will create and return a 'Content' object that contains a
stack trace.
The mime type is set to 'text/x-traceback;language=python', so other
languages can format their stack traces differently.
:param prefix_content: A unicode string to add before the stack lines.
:param postfix_content: A unicode string to add after the stack lines.
"""
stack = traceback.walk_stack(None)
def filter_stack(stack):
# Discard the filter_stack frame.
next(stack)
# Discard the StacktraceContent frame.
next(stack)
for f, f_lineno in stack:
if StackLinesContent.HIDE_INTERNAL_STACK:
if '__unittest' in f.f_globals:
return
yield f, f_lineno
extract = traceback.StackSummary.extract(filter_stack(stack))
extract.reverse()
return StackLinesContent(extract, prefix_content, postfix_content)
def json_content(json_data):
"""Create a JSON Content object from JSON-encodeable data."""
data = json.dumps(json_data)
if str_is_unicode:
# The json module perversely returns native str not bytes
data = data.encode('utf8')
return Content(JSON, lambda: [data])
def text_content(text):
"""Create a Content object from some text.
This is useful for adding details which are short strings.
"""
if not istext(text):
raise TypeError(
"text_content must be given text, not '%s'." % type(text).__name__
)
return Content(UTF8_TEXT, lambda: [text.encode('utf8')])
def maybe_wrap(wrapper, func):
"""Merge metadata for func into wrapper if functools is present."""
if functools is not None:
wrapper = functools.update_wrapper(wrapper, func)
return wrapper
def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
buffer_now=False, seek_offset=None, seek_whence=0):
"""Create a Content object from a file on disk.
Note that unless ``buffer_now`` is explicitly passed in as True, the file
will only be read from when ``iter_bytes`` is called.
:param path: The path to the file to be used as content.
:param content_type: The type of content. If not specified, defaults
to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file.
Defaults to ``DEFAULT_CHUNK_SIZE``.
:param buffer_now: If True, read the file from disk now and keep it in
memory. Otherwise, only read when the content is serialized.
:param seek_offset: If non-None, seek within the stream before reading it.
:param seek_whence: If supplied, pass to ``stream.seek()`` when seeking.
"""
if content_type is None:
content_type = UTF8_TEXT
def reader():
with open(path, 'rb') as stream:
for chunk in _iter_chunks(stream,
chunk_size,
seek_offset,
seek_whence):
yield chunk
return content_from_reader(reader, content_type, buffer_now)
def content_from_stream(stream, content_type=None,
chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False,
seek_offset=None, seek_whence=0):
"""Create a Content object from a file-like stream.
Note that unless ``buffer_now`` is explicitly passed in as True, the stream
will only be read from when ``iter_bytes`` is called.
:param stream: A file-like object to read the content from. The stream
is not closed by this function or the 'Content' object it returns.
:param content_type: The type of content. If not specified, defaults
to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file.
Defaults to ``DEFAULT_CHUNK_SIZE``.
:param buffer_now: If True, reads from the stream right now. Otherwise,
only reads when the content is serialized. Defaults to False.
:param seek_offset: If non-None, seek within the stream before reading it.
:param seek_whence: If supplied, pass to ``stream.seek()`` when seeking.
"""
if content_type is None:
content_type = UTF8_TEXT
reader = lambda: _iter_chunks(stream, chunk_size, seek_offset, seek_whence)
return content_from_reader(reader, content_type, buffer_now)
def content_from_reader(reader, content_type, buffer_now):
"""Create a Content object that will obtain the content from reader.
:param reader: A callback to read the content. Should return an iterable of
bytestrings.
:param content_type: The content type to create.
:param buffer_now: If True the reader is evaluated immediately and
buffered.
"""
if content_type is None:
content_type = UTF8_TEXT
if buffer_now:
contents = list(reader())
reader = lambda: contents
return Content(content_type, | |
<reponame>geo2tag-logistics/main
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User, Group
from django.shortcuts import get_object_or_404
from django.utils import timezone
from rest_framework import status
from rest_framework.authentication import BasicAuthentication, SessionAuthentication
from rest_framework.response import Response
from rest_framework.views import APIView
from logistics.Geo2TagService import updateDriverPos, deleteFleetChannel, deleteDriverPos, clearAllFleetChannels
from logistics.permissions import is_driver, is_owner, IsOwnerPermission, IsDriverPermission, IsOwnerOrDriverPermission
from .forms import SignUpForm, LoginForm, FleetAddForm, FleetInviteDismissForm, DriverPendingFleetAddDeclineForm, AddTripForm, DriverReportProblemForm, \
DriverAcceptTripForm, DriverUpdatePosForm
from .models import Fleet, Driver, Owner, DriverStats, Trip
from .serializers import FleetSerializer, DriverSerializer, TripSerializer
class CsrfExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
return # To not perform the csrf check previously happening
class SignUp(APIView):
def post(self, request):
form = SignUpForm(request.data)
if form.is_valid():
try:
user = User.objects.create_user(username=form.cleaned_data["login"], email=form.cleaned_data["email"], password=form.cleaned_data["password"])
if form.cleaned_data["role"] == "1" :
user.groups.add(Group.objects.get_or_create(name='OWNER')[0])
owner = Owner.objects.create(user=user, first_name=form.cleaned_data["first_name"], last_name=form.cleaned_data["last_name"])
user.save()
owner.save()
else:
user.groups.add(Group.objects.get_or_create(name='DRIVER')[0])
driver = Driver.objects.create(user=user, first_name=form.cleaned_data["first_name"], last_name=form.cleaned_data["last_name"])
driver_stats = DriverStats.objects.create(driver=driver)
user.save()
driver.save()
driver_stats.save()
return Response({"status": "ok"}, status=status.HTTP_201_CREATED)
except Exception as e:
return Response({"status": "error", "errors": [str(e)]}, status=status.HTTP_409_CONFLICT)
else:
return Response({"status": "error", "errors": ["Invalid post parameters"]}, status=status.HTTP_400_BAD_REQUEST)
class Auth(APIView):
def post(self, request):
if not request.user.is_anonymous():
return Response({"status": "error", "errors": ["Already login"]}, status=status.HTTP_409_CONFLICT)
form = LoginForm(request.data)
if form.is_valid():
try:
user = authenticate(username=form.cleaned_data["login"], password=form.cleaned_data["password"])
if user is not None:
if user.is_active:
login(request, user)
return Response({"status": "ok"}, status=status.HTTP_200_OK)
else:
return Response({"status": "error"}, status=status.HTTP_409_CONFLICT)
except Exception as e:
return Response({"status": "error", "errors": [str(e)]}, status=status.HTTP_409_CONFLICT)
else:
return Response({"status": "error", "errors": ["Invalid post parameters"]}, status=status.HTTP_400_BAD_REQUEST)
class Logout(APIView):
def get(self, request):
if request.user.is_anonymous():
return Response({"status": "error", "errors": ["Not authorized"]}, status=status.HTTP_409_CONFLICT)
else:
logout(request)
return Response({"status": "ok"}, status=status.HTTP_200_OK)
class FleetList(APIView):
permission_classes = (IsOwnerOrDriverPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get(self, request):
if is_owner(request.user):
fleets = Fleet.objects.filter(owner=request.user.owner)
serialized_fleets = FleetSerializer(fleets, many=True)
return Response(serialized_fleets.data, status=status.HTTP_200_OK)
elif is_driver(request.user):
fleets = request.user.driver.fleets
serialized_fleets = FleetSerializer(fleets, many=True)
return Response(serialized_fleets.data, status=status.HTTP_200_OK)
else:
return Response({"status": "error", "errors": ["Not authorized"]}, status=status.HTTP_400_BAD_REQUEST)
def post(self, request):
current_user = request.user
print(current_user)
form = FleetAddForm(request.data)
owner = request.user.owner
if form.is_valid():
try:
fleet = form.save(commit=False)
fleet.owner = owner
fleet.save()
print(fleet.name, fleet.description, fleet.owner, fleet.id)
return Response({"status": "ok", "fleet_id": fleet.id}, status=status.HTTP_201_CREATED)
except:
return Response({"status": "error"}, status=status.HTTP_409_CONFLICT)
else:
return Response({"status": "error"}, status=status.HTTP_400_BAD_REQUEST)
class DriversByFleet(APIView):
permission_classes = (IsOwnerPermission,)
def get(self, request, fleet_id):
if Fleet.objects.get(pk=fleet_id) in Fleet.objects.filter(owner=request.user.owner):
drivers = Driver.objects.filter(fleets=fleet_id)
serialized_drivers = DriverSerializer(drivers, many=True)
return Response(serialized_drivers.data, status=status.HTTP_200_OK)
else:
return Response({"status": "error", "errors": ["Wrong fleet_id"]}, status=status.HTTP_409_CONFLICT)
class PendingDriversByFleet(APIView):
permission_classes = (IsOwnerPermission,)
def get(self, request, fleet_id):
if Fleet.objects.get(pk=fleet_id) in Fleet.objects.filter(owner=request.user.owner):
drivers = Driver.objects.exclude(fleets=fleet_id).exclude(pending_fleets=fleet_id)
serialized_drivers = DriverSerializer(drivers, many=True)
return Response(serialized_drivers.data, status=status.HTTP_200_OK)
else:
return Response({"status": "error", "errors": ["Wrong fleet_id"]}, status=status.HTTP_409_CONFLICT)
class FleetByIdView(APIView):
permission_classes = (IsOwnerPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get(self, request, fleet_id):
fleet = Fleet.objects.get(id=fleet_id)
if fleet in Fleet.objects.filter(owner=request.user.owner):
serialized_fleet = FleetSerializer(fleet, many=False)
return Response(serialized_fleet.data, status=status.HTTP_200_OK)
else:
return Response({"status": "error"}, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, fleet_id):
fleet_for_delete = Fleet.objects.get(id=fleet_id)
if fleet_for_delete in Fleet.objects.filter(owner=request.user.owner):
deleteFleetChannel()
fleet_for_delete.delete()
return Response({"status": "ok"}, status=status.HTTP_200_OK)
else:
return Response({"status": "error"}, status=status.HTTP_400_BAD_REQUEST)
# OWNER API
class FleetInvite(APIView):
permission_classes = (IsOwnerPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def post(self, request, fleet_id):
# POST /api/fleet/(?P<fleet_id>[-\w]+)/invite/
form_offer_invite = FleetInviteDismissForm(request.data)
if form_offer_invite.is_valid():
try:
fleet = Fleet.objects.get(id=fleet_id)
if fleet in Fleet.objects.filter(owner=request.user.owner):
ids = form_offer_invite.cleaned_data.get('driver_id')
failed = False
ids_fleet_failed = []
ids_pending_failed = []
for driver_id in ids.split(sep=','):
if driver_id != '':
driver = Driver.objects.get(id=driver_id)
if fleet in driver.fleets.all():
ids_fleet_failed.append(driver_id)
failed = True
elif fleet in driver.pending_fleets.all():
ids_pending_failed.append(driver_id)
failed = True
else:
driver.pending_fleets.add(fleet)
driver.save()
if failed:
return Response({"status": "error",
"errors": {"Drivers is already in fleet": ids_fleet_failed,
"Drivers is already in pending fleet": ids_pending_failed}},
status=status.HTTP_409_CONFLICT)
return Response({"status": "ok"}, status=status.HTTP_200_OK)
else:
return Response({"status": "error", "errors": ["Not owner of fleet"]},
status=status.HTTP_409_CONFLICT)
except Exception as e:
return Response({"status": "error", "errors": [str(e)]}, status=status.HTTP_409_CONFLICT)
else:
return Response({"status": "error"}, status=status.HTTP_400_BAD_REQUEST)
class FleetDismiss(APIView):
permission_classes = (IsOwnerPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def post(self, request, fleet_id):
# POST /api/fleet/(?P<fleet_id>[-\w]+)/dismiss/
form_dismiss = FleetInviteDismissForm(request.data)
if form_dismiss.is_valid():
try:
fleet = Fleet.objects.get(id=fleet_id)
if fleet in Fleet.objects.filter(owner=request.user.owner):
id = form_dismiss.cleaned_data.get('driver_id')
driver = Driver.objects.get(id=id)
deleteDriverPos(fleet, driver)
driver.fleets.remove(fleet)
driver.save()
print(fleet.id, id, driver.id)
return Response({"status": "ok"}, status=status.HTTP_200_OK)
else:
return Response({"status": "error", "errors": ["Not owner of fleet"]}, status=status.HTTP_409_CONFLICT)
except:
return Response({"status": "error"}, status=status.HTTP_409_CONFLICT)
else:
return Response({"status": "error"}, status=status.HTTP_400_BAD_REQUEST)
class TripsByFleetUnaccepted(APIView):
permission_classes = (IsOwnerPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get(self, request, fleet_id):
# GET /api/fleet/(?P<fleet_id>[-\w]+)/trips/unaccepted/
fleet = get_object_or_404(Fleet, id=fleet_id, owner=request.user.owner)
trips = Trip.objects.filter(fleet=fleet, driver=None, is_finished=False)
serialized_trips = TripSerializer(trips, many=True)
return Response(serialized_trips.data, status=status.HTTP_200_OK)
class TripsByFleetFinished(APIView):
permission_classes = (IsOwnerPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get(self, request, fleet_id):
# GET /api/fleet/(?P<fleet_id>[-\w]+)/trips/finished/
fleet = get_object_or_404(Fleet, id=fleet_id, owner=request.user.owner)
trips = Trip.objects.filter(fleet=fleet, is_finished=True)
serialized_trips = TripSerializer(trips, many=True)
return Response(serialized_trips.data, status=status.HTTP_200_OK)
# DRIVER API
class DriverPendingFleets(APIView):
permission_classes = (IsDriverPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get(self, request):
#GET /api/driver/pending_fleets/
pending_fleets = request.user.driver.pending_fleets
serialized_pending_fleets = FleetSerializer(pending_fleets, many=True)
return Response(serialized_pending_fleets.data, status=status.HTTP_200_OK)
class DriverPendingFleetsAccept(APIView):
permission_classes = (IsDriverPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def post(self, request):
# POST /api/driver/pending_fleets/accept/
form_pending_to_fleet = DriverPendingFleetAddDeclineForm(request.data)
if form_pending_to_fleet.is_valid():
try:
fleets = request.user.driver.fleets
pending_fleets = request.user.driver.pending_fleets
ids = form_pending_to_fleet.cleaned_data.get('fleet_id')
for fleet_id in ids.split(sep=','):
try:
waited_fleet = Fleet.objects.get(id=fleet_id)
print(waited_fleet.id)
except Exception as e:
return Response({"status": "error", "errors": [str(e)]}, status=status.HTTP_409_CONFLICT)
if waited_fleet is not None:
if waited_fleet in pending_fleets.all():
pending_fleets.remove(waited_fleet)
fleets.add(waited_fleet)
print("accepted " + str(waited_fleet.id) + " by " + str(request.user.username))
return Response({"status": "ok"}, status=status.HTTP_200_OK)
except Exception as e:
return Response({"status": "error", "errors": [str(e)]}, status=status.HTTP_409_CONFLICT)
else:
return Response({"status": "error"}, status=status.HTTP_400_BAD_REQUEST)
class DriverPendingFleetsDecline(APIView):
permission_classes = (IsDriverPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def post(self, request):
# POST /api/driver/pending_fleets/decline/
form_pending_decline = DriverPendingFleetAddDeclineForm(request.data)
if form_pending_decline.is_valid():
try:
pending_fleets = request.user.driver.pending_fleets
ids = form_pending_decline.cleaned_data.get('fleet_id')
for fleet_id in ids.split(sep=','):
try:
waited_fleet = Fleet.objects.get(id=fleet_id)
print(waited_fleet.id)
except Exception as e:
return Response({"status": "error", "errors": [str(e)]}, status=status.HTTP_409_CONFLICT)
if waited_fleet is not None:
if waited_fleet in pending_fleets.all():
pending_fleets.remove(waited_fleet)
print("declined " + str(waited_fleet.id) + " by " + str(request.user.username))
return Response({"status": "ok"}, status=status.HTTP_200_OK)
except Exception as e:
return Response({"status": "error", "errors": [str(e)]}, status=status.HTTP_409_CONFLICT)
else:
return Response({"status": "error"}, status=status.HTTP_400_BAD_REQUEST)
class DriverFleets(APIView):
permission_classes = (IsDriverPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get(self, request):
#GET /api/driver/fleets/
fleets = request.user.driver.fleets
serialized_fleets = FleetSerializer(fleets, many=True)
return Response(serialized_fleets.data, status=status.HTTP_200_OK)
class DriverFleetAvailableTrips(APIView):
permission_classes = (IsDriverPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get(self, request, fleet_id):
#GET /api/driver/fleet/<fleet_id>/available_trips/
try:
fleet = Fleet.objects.get(id=fleet_id)
except:
return Response({"status": "error"}, status=status.HTTP_404_NOT_FOUND)
trips = Trip.objects.none()
if fleet in request.user.driver.fleets.all():
trips = Trip.objects.filter(fleet=fleet, driver=None, is_finished=False)
serialized_trips = TripSerializer(trips, many=True)
return Response(serialized_trips.data, status=status.HTTP_200_OK)
class DriverAvailableTrips(APIView):
permission_classes = (IsDriverPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get(self, request):
#GET /api/driver/available_trips/
fleets = request.user.driver.fleets
trips = Trip.objects.none()
for fleet in fleets.all():
trips_add = Trip.objects.filter(fleet=fleet, driver=None, is_finished=False)
trips = trips | trips_add
serialized_trips = TripSerializer(trips, many=True)
return Response(serialized_trips.data, status=status.HTTP_200_OK)
class DriverFleetTrips(APIView):
permission_classes = (IsDriverPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get(self, request, fleet_id):
#GET /api/driver/fleet/<fleet_id>/trips/
try:
fleet = Fleet.objects.get(id=fleet_id)
print(fleet)
except:
return Response({"status": "error"}, status=status.HTTP_409_CONFLICT)
trips = Trip.objects.none()
if fleet in request.user.driver.fleets.all():
trips = Trip.objects.filter(fleet=fleet, driver=request.user.driver)
serialized_trips = TripSerializer(trips, many=True)
return Response(serialized_trips.data, status=status.HTTP_200_OK)
class DriverTrips(APIView):
permission_classes = (IsDriverPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get(self, request):
#GET /api/driver/trips/
fleets = request.user.driver.fleets
trips = Trip.objects.none()
for fleet in fleets.all():
trips_add = Trip.objects.filter(fleet=fleet, driver=request.user.driver)
trips = trips | trips_add
serialized_trips = TripSerializer(trips, many=True)
return Response(serialized_trips.data, status=status.HTTP_200_OK)
class TripById(APIView):
permission_classes = (IsOwnerOrDriverPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get(self, request, trip_id):
#GET /api/driver/trips/
trip = get_object_or_404(Trip, id=trip_id)
current_user = request.user
if is_driver(current_user) and trip.driver!=current_user.driver:
return Response({"status": "error", "errors": "Not your trip"},status=status.HTTP_409_CONFLICT)
if is_owner(current_user) and (trip.fleet.owner!=current_user.owner):
return Response({"status": "error", "errors": "Not your trip"}, status=status.HTTP_409_CONFLICT)
serialized_trips = TripSerializer(trip)
return Response(serialized_trips.data, status=status.HTTP_200_OK)
class DriverAcceptTrip(APIView):
permission_classes = (IsDriverPermission,)
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def post(self, request):
#POST /api/driver/accept_trip/
driver = request.user.driver
trip_id_form = DriverAcceptTripForm(request.data)
if not trip_id_form.is_valid():
return Response({"status": "trip_id_form not valid"}, status=status.HTTP_400_BAD_REQUEST)
trip_id = trip_id_form.cleaned_data.get('trip_id')
trip = get_object_or_404(Trip, id=trip_id)
try:
print(trip, trip.id, trip.fleet, trip.driver, trip.is_finished)
print(driver, driver.fleets.all())
if trip.driver == driver and trip.is_finished:
return Response({"status": "error", "errors": "You have already been finished this trip"},
status=status.HTTP_409_CONFLICT)
elif trip.driver == driver:
# TODO Redirect to page with current trip
return Response({"status": "error", "errors": "It's your current trip"},
status=status.HTTP_409_CONFLICT)
elif trip.driver is not None:
return Response({"status": "error", "errors": "This trip has already been accepted"},
status=status.HTTP_409_CONFLICT)
elif trip.is_finished:
return Response({"status": "error", "errors": "This trip is finished but don't have a driver!!!"},
status=status.HTTP_409_CONFLICT)
if trip.fleet not in driver.fleets.all():
return Response({"status": "error", "errors": "You are not a member in that fleet"},
status=status.HTTP_409_CONFLICT)
if Trip.objects.filter(driver=driver, is_finished=False).exists():
# TODO Redirect to page with current trip
return Response({"status": "error", "errors": "You have already accepted current trip"},
status=status.HTTP_409_CONFLICT)
# TODO Change 1 to static variable
if trip.problem is not 1:
return Response({"status": "error", "errors": "The trip has a problem"},
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Build all possible graphs using a Randomised Stepwise Addition Order Algorithm w/ Branch and Bound.
# Usage...
# python -u permute_qpgraph.py 1> permute-std.log 2> permute-err.log
import xml.etree.ElementTree as ElemTree
import re
import sys
import csv
import glob
# TODO this throws a warning...
# import matplotlib
# matplotlib.use('Agg')
import numpy as np
# import the clustering libraries
from scipy.cluster.hierarchy import linkage, fcluster
# use the Pathos library for improved multi-processing
import pathos.multiprocessing as mp
# import the custom modules
from pipeline_utils import *
from itertools import izip
from cStringIO import StringIO
from Bio import Phylo
from graph_tool import *
from graph_tool.topology import *
# TODO improve parsimony...
# on first pass, only allow non-admix insertion
# if can't be added, then send to back of list
# if it fails admix insertion (when it's turn comes up again)
# then throw
class PermuteQpgraph:
# how many outliers should we allow before pruning a branch in graph space
MAX_OUTLIER_THRESHOLD = 0
# print PDFs for graphs with (N - offset) nodes
REMAINING_PRINT_OFFSET = 0
def __init__(self, par_file, log_file, dot_path, pdf_path, nodes, outgroup, exhaustive, verbose, nthreads):
"""
Initialise the object attributes
"""
self.par_file = par_file
self.dot_path = dot_path
self.pdf_path = pdf_path
self.verbose = verbose
self.nthreads = nthreads
# should we try all possible graphs, or should we stop when we find something reasonable
self.exhaustive_search = exhaustive
# open the file for writing
self.log_handle = open(log_file, 'a')
if outgroup in nodes:
nodes.remove(outgroup)
self.nodes = nodes
self.outgroup = outgroup
self.root_node = 'R'
self.problem_nodes = []
self.tested_graphs = set()
self.solutions = set()
def log(self, message):
"""
Handle message logging to file/stdout.
"""
# send message to the log file
print >> self.log_handle, message
self.log_handle.flush()
if self.verbose:
# echo to stdout
print message
sys.stdout.flush()
def recurse_tree(self, root_tree, new_tag, remaining, depth=0):
"""
Permute all possible new trees, by adding the new node to all branches.
If no resulting tree passes the outlier threshold then try adding the node to all possible pairs of branches.
"""
new_trees = []
# get all the nodes in the tree (skip the outgroup)
target_nodes = [node for node in root_tree.findall('.//*') if node.tag != self.outgroup]
# add the new node to every branch in the tree
for target_node in target_nodes:
# clone the current tree and add the new node
new_tree = copy.deepcopy(root_tree)
self.insert_node(new_tree, target_node, new_tag)
new_trees.append(new_tree)
# test all the trees
results = self.test_trees(new_trees, depth)
# process the results
node_placed = self.check_results(results, remaining, depth)
# test all the admixture possibilities
if not node_placed:
admix_trees = []
# permute all the two parent admixture possibilities
pairs = list(itertools.combinations(target_nodes, 2))
for target1, target2 in pairs:
# skip duplicate targets (this happens when there is already an admixture node in the tree)
if target1.tag == target2.tag:
continue
# clone the current tree
new_tree = copy.deepcopy(root_tree)
# make a new intermediate node
admix_label = self.new_label(new_tree, admix=True)
# add two admix nodes as the children of both targets
admix_nodes = [
self.insert_node(new_tree, target1, admix_label, attrs={'internal': '1', 'admix': '1', 'side': 'l'}),
self.insert_node(new_tree, target2, admix_label, attrs={'internal': '1', 'admix': '1', 'side': 'r'})
]
# choose the actual parent based on the sort order of the tag name (needed for unique tree hashing)
admix_node = admix_nodes[0] if target1.tag < target2.tag else admix_nodes[1]
# add the new node as the child of the preferred admix node
self.insert_node(new_tree, admix_node, new_tag, append=True)
admix_trees.append(new_tree)
# test all the admixture trees
results = self.test_trees(admix_trees, depth)
# process the results
node_placed = self.check_results(results, remaining, depth)
if not node_placed:
# we could not place the node via either method :(
if new_tag not in self.problem_nodes and remaining and not self.exhaustive_search:
self.log("WARNING: Unable to place node '%s' at this time." % new_tag)
self.problem_nodes.append(new_tag)
# add the problem node to end of the list, as we may be able to add it later on
remaining.append(new_tag)
# try and add the other nodes
self.recurse_tree(root_tree, remaining[0], remaining[1:], depth)
else:
raise NodeUnplaceable("ERROR: Cannot place node '%s' in the graph." % new_tag)
def test_trees(self, new_trees, depth):
"""
Run qpGraph on a list of trees
"""
if self.nthreads > 1:
# we need to buffer the results to use multi-threading
pool = mp.ProcessingPool(self.nthreads)
results = pool.map(self.run_qpgraph, itertools.izip(new_trees, itertools.repeat(depth)))
else:
# test the trees without multi-threading
results = []
for new_tree in new_trees:
result = self.run_qpgraph((new_tree, depth))
results.append(result)
return results
def check_results(self, results, remaining, depth):
"""
Check the results from qpGraph
"""
# were we able to place the new node
placed_node = False
for new_tree, outliers, graph_name in results:
# add this graph to the list of those we've tested
self.tested_graphs.add(graph_name)
# did our new trees pass the threshold
if len(outliers) <= self.MAX_OUTLIER_THRESHOLD:
# recursively add any remaining nodes
if remaining:
self.recurse_tree(new_tree, remaining[0], remaining[1:], depth + 1)
else:
self.log("SUCCESS: Placed all nodes on a graph without outliers!")
# add this graph to the list of solutions
self.solutions.add(graph_name)
# we successfully placed the new node!
placed_node = True
return placed_node
def insert_node(self, new_tree, target_node, new_tag, attrs=None, append=False):
"""
Helper function to add a new node on the branch leading to the target node.
"""
# get the target node in the new tree
target_xpath = './/' + target_node.tag
if target_node.get('side'):
target_xpath += '[@side="%s"]' % target_node.get('side')
target_node = new_tree.find(target_xpath)
if append:
# append the new node directly to the target
parent_node = target_node
else:
# get the parent of the target
parent_node = new_tree.find(target_xpath + '/..')
# does the target node have a sibling
if len(parent_node) > 1:
label = self.new_label(new_tree)
parent_node.remove(target_node)
# add an intermediate node, to act as the parent for the new node
parent_node = ElemTree.SubElement(parent_node, label)
parent_node.set('internal', '1')
# re add the target node
# ElemTree.SubElement(parent_node, target_node.tag)
parent_node.append(target_node)
# add the new node as a sibling to the target
new_node = ElemTree.SubElement(parent_node, new_tag)
if attrs:
# add any node attributes
for key, value in attrs.iteritems():
new_node.set(key, value)
return new_node
def run_qpgraph(self, args):
"""
Run qpGraph on the given tree
"""
# extract the tuple of arguments
new_tree, depth = args
# convert the tree to newick format
newick = self.print_newick_tree(new_tree)
# get unique names for the output files
graph_name = self.hash_text(newick)
grp_file = self.dot_path + '-{name}.graph'.format(name=graph_name)
dot_file = self.dot_path + '-{name}.dot'.format(name=graph_name)
log_file = self.dot_path + '-{name}.log'.format(name=graph_name)
xml_file = self.dot_path + '-{name}.xml'.format(name=graph_name)
try:
# if the log file exists then we've run the analysis already
with open(log_file, 'r') as fin:
log = fin.read()
except IOError:
# save the xml file
new_tree.write(xml_file)
# convert the tree to qpGraph format
graph = self.export_qpgraph(new_tree)
# save the graph file
with open(grp_file, 'w') as fout:
fout.write(graph)
# run qpGraph
log = run_cmd(["qpGraph", "-p", self.par_file, "-g", grp_file, "-d", dot_file], verbose=False)
# save the log file
with open(log_file, 'w') as fout:
fout.write(log)
# parse the log and extract the outliers
outliers, worst_fstat = self.extract_outliers(log.splitlines())
# count the leaf nodes
all_nodes = new_tree.findall('.//*')
num_nodes = len([node for node in all_nodes if node.get('internal') != '1'])
num_admix = len([node for node in all_nodes if node.get('admix') == '1']) / 2
num_outliers = len(outliers)
# only print PDFs for graphs that pass the threshold
if num_outliers <= self.MAX_OUTLIER_THRESHOLD and num_nodes > (len(self.nodes) - self.REMAINING_PRINT_OFFSET):
# embed some useful metadata info in the PDF name
pdf_file = self.pdf_path + '-n{nodes}-o{out}-a{admix}-{name}.pdf'.format(nodes=num_nodes,
out=num_outliers,
admix=num_admix,
name=graph_name)
# pretty print the qpGraph dot file
pprint_qpgraph(dot_file, pdf_file)
# output some summary stats
self.log("{padding}{tree} \tnodes={nodes}\t admix={admix}\t outliers={out}\t worst={worst}\t {name}".format(
padding=" "*depth, name=graph_name, tree=newick.ljust(80), nodes=num_nodes, admix=num_admix,
out=len(outliers), worst=worst_fstat[-1]))
return new_tree, outliers, graph_name
@staticmethod
def extract_outliers(log):
"""
Parse the log file and extract the outliers
"""
outliers = []
read_log = False
worst_fstat = []
for line in log:
if 'outliers' in line:
read_log = True
continue
elif 'worst f-stat' in line:
worst_fstat = line.split()
read_log = False
continue
if read_log and len(line.strip()) > 0:
# save all the outliers
outliers.append(line.split())
return outliers, worst_fstat
def export_qpgraph(self, root_tree):
"""
Convert the ElementTree into qpGraph format
"""
# clone the tree because this process is destructive
local_tree = copy.deepcopy(root_tree)
graph = "root\t{root}\n".format(root=self.root_node)
# | |
# Copyright (c) 2012-2020 Jicamarca Radio Observatory
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
"""API to create signal chain projects
The API is provide through class: Project
"""
import re
import sys
import ast
import datetime
import traceback
import time
import multiprocessing
from multiprocessing import Process, Queue
from threading import Thread
from xml.etree.ElementTree import ElementTree, Element, SubElement
from schainpy.admin import Alarm, SchainWarning
from schainpy.model import *
from schainpy.utils import log
if 'darwin' in sys.platform and sys.version_info[0] == 3 and sys.version_info[1] > 7:
multiprocessing.set_start_method('fork')
class ConfBase():
def __init__(self):
self.id = '0'
self.name = None
self.priority = None
self.parameters = {}
self.object = None
self.operations = []
def getId(self):
return self.id
def getNewId(self):
return int(self.id) * 10 + len(self.operations) + 1
def updateId(self, new_id):
self.id = str(new_id)
n = 1
for conf in self.operations:
conf_id = str(int(new_id) * 10 + n)
conf.updateId(conf_id)
n += 1
def getKwargs(self):
params = {}
for key, value in self.parameters.items():
if value not in (None, '', ' '):
params[key] = value
return params
def update(self, **kwargs):
for key, value in kwargs.items():
self.addParameter(name=key, value=value)
def addParameter(self, name, value, format=None):
'''
'''
if isinstance(value, str) and re.search(r'(\d+/\d+/\d+)', value):
self.parameters[name] = datetime.date(*[int(x) for x in value.split('/')])
elif isinstance(value, str) and re.search(r'(\d+:\d+:\d+)', value):
self.parameters[name] = datetime.time(*[int(x) for x in value.split(':')])
else:
try:
self.parameters[name] = ast.literal_eval(value)
except:
if isinstance(value, str) and ',' in value:
self.parameters[name] = value.split(',')
else:
self.parameters[name] = value
def getParameters(self):
params = {}
for key, value in self.parameters.items():
s = type(value).__name__
if s == 'date':
params[key] = value.strftime('%Y/%m/%d')
elif s == 'time':
params[key] = value.strftime('%H:%M:%S')
else:
params[key] = str(value)
return params
def makeXml(self, element):
xml = SubElement(element, self.ELEMENTNAME)
for label in self.xml_labels:
xml.set(label, str(getattr(self, label)))
for key, value in self.getParameters().items():
xml_param = SubElement(xml, 'Parameter')
xml_param.set('name', key)
xml_param.set('value', value)
for conf in self.operations:
conf.makeXml(xml)
def __str__(self):
if self.ELEMENTNAME == 'Operation':
s = ' {}[id={}]\n'.format(self.name, self.id)
else:
s = '{}[id={}, inputId={}]\n'.format(self.name, self.id, self.inputId)
for key, value in self.parameters.items():
if self.ELEMENTNAME == 'Operation':
s += ' {}: {}\n'.format(key, value)
else:
s += ' {}: {}\n'.format(key, value)
for conf in self.operations:
s += str(conf)
return s
class OperationConf(ConfBase):
ELEMENTNAME = 'Operation'
xml_labels = ['id', 'name']
def setup(self, id, name, priority, project_id, err_queue):
self.id = str(id)
self.project_id = project_id
self.name = name
self.type = 'other'
self.err_queue = err_queue
def readXml(self, element, project_id, err_queue):
self.id = element.get('id')
self.name = element.get('name')
self.type = 'other'
self.project_id = str(project_id)
self.err_queue = err_queue
for elm in element.iter('Parameter'):
self.addParameter(elm.get('name'), elm.get('value'))
def createObject(self):
className = eval(self.name)
if 'Plot' in self.name or 'Writer' in self.name or 'Send' in self.name or 'print' in self.name:
kwargs = self.getKwargs()
opObj = className(self.id, self.id, self.project_id, self.err_queue, **kwargs)
opObj.start()
self.type = 'external'
else:
opObj = className()
self.object = opObj
return opObj
class ProcUnitConf(ConfBase):
ELEMENTNAME = 'ProcUnit'
xml_labels = ['id', 'inputId', 'name']
def setup(self, project_id, id, name, datatype, inputId, err_queue):
'''
'''
if datatype == None and name == None:
raise ValueError('datatype or name should be defined')
if name == None:
if 'Proc' in datatype:
name = datatype
else:
name = '%sProc' % (datatype)
if datatype == None:
datatype = name.replace('Proc', '')
self.id = str(id)
self.project_id = project_id
self.name = name
self.datatype = datatype
self.inputId = inputId
self.err_queue = err_queue
self.operations = []
self.parameters = {}
def removeOperation(self, id):
i = [1 if x.id==id else 0 for x in self.operations]
self.operations.pop(i.index(1))
def getOperation(self, id):
for conf in self.operations:
if conf.id == id:
return conf
def addOperation(self, name, optype='self'):
'''
'''
id = self.getNewId()
conf = OperationConf()
conf.setup(id, name=name, priority='0', project_id=self.project_id, err_queue=self.err_queue)
self.operations.append(conf)
return conf
def readXml(self, element, project_id, err_queue):
self.id = element.get('id')
self.name = element.get('name')
self.inputId = None if element.get('inputId') == 'None' else element.get('inputId')
self.datatype = element.get('datatype', self.name.replace(self.ELEMENTNAME.replace('Unit', ''), ''))
self.project_id = str(project_id)
self.err_queue = err_queue
self.operations = []
self.parameters = {}
for elm in element:
if elm.tag == 'Parameter':
self.addParameter(elm.get('name'), elm.get('value'))
elif elm.tag == 'Operation':
conf = OperationConf()
conf.readXml(elm, project_id, err_queue)
self.operations.append(conf)
def createObjects(self):
'''
Instancia de unidades de procesamiento.
'''
className = eval(self.name)
kwargs = self.getKwargs()
procUnitObj = className()
procUnitObj.name = self.name
log.success('creating process...', self.name)
for conf in self.operations:
opObj = conf.createObject()
log.success('adding operation: {}, type:{}'.format(
conf.name,
conf.type), self.name)
procUnitObj.addOperation(conf, opObj)
self.object = procUnitObj
def run(self):
'''
'''
return self.object.call(**self.getKwargs())
class ReadUnitConf(ProcUnitConf):
ELEMENTNAME = 'ReadUnit'
def __init__(self):
self.id = None
self.datatype = None
self.name = None
self.inputId = None
self.operations = []
self.parameters = {}
def setup(self, project_id, id, name, datatype, err_queue, path='', startDate='', endDate='',
startTime='', endTime='', server=None, **kwargs):
if datatype == None and name == None:
raise ValueError('datatype or name should be defined')
if name == None:
if 'Reader' in datatype:
name = datatype
datatype = name.replace('Reader','')
else:
name = '{}Reader'.format(datatype)
if datatype == None:
if 'Reader' in name:
datatype = name.replace('Reader','')
else:
datatype = name
name = '{}Reader'.format(name)
self.id = id
self.project_id = project_id
self.name = name
self.datatype = datatype
self.err_queue = err_queue
self.addParameter(name='path', value=path)
self.addParameter(name='startDate', value=startDate)
self.addParameter(name='endDate', value=endDate)
self.addParameter(name='startTime', value=startTime)
self.addParameter(name='endTime', value=endTime)
for key, value in kwargs.items():
self.addParameter(name=key, value=value)
class Project(Process):
"""API to create signal chain projects"""
ELEMENTNAME = 'Project'
def __init__(self, name=''):
Process.__init__(self)
self.id = '1'
if name:
self.name = '{} ({})'.format(Process.__name__, name)
self.filename = None
self.description = None
self.email = None
self.alarm = []
self.configurations = {}
# self.err_queue = Queue()
self.err_queue = None
self.started = False
def getNewId(self):
idList = list(self.configurations.keys())
id = int(self.id) * 10
while True:
id += 1
if str(id) in idList:
continue
break
return str(id)
def updateId(self, new_id):
self.id = str(new_id)
keyList = list(self.configurations.keys())
keyList.sort()
n = 1
new_confs = {}
for procKey in keyList:
conf = self.configurations[procKey]
idProcUnit = str(int(self.id) * 10 + n)
conf.updateId(idProcUnit)
new_confs[idProcUnit] = conf
n += 1
self.configurations = new_confs
def setup(self, id=1, name='', description='', email=None, alarm=[]):
self.id = str(id)
self.description = description
self.email = email
self.alarm = alarm
if name:
self.name = '{} ({})'.format(Process.__name__, name)
def update(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def clone(self):
p = Project()
p.id = self.id
p.name = self.name
p.description = self.description
p.configurations = self.configurations.copy()
return p
def addReadUnit(self, id=None, datatype=None, name=None, **kwargs):
'''
'''
if id is None:
idReadUnit = self.getNewId()
else:
idReadUnit = str(id)
conf = ReadUnitConf()
conf.setup(self.id, idReadUnit, name, datatype, self.err_queue, **kwargs)
self.configurations[conf.id] = conf
return conf
def addProcUnit(self, id=None, inputId='0', datatype=None, name=None):
'''
'''
if id is None:
idProcUnit = self.getNewId()
else:
idProcUnit = id
conf = ProcUnitConf()
conf.setup(self.id, idProcUnit, name, datatype, inputId, self.err_queue)
self.configurations[conf.id] = conf
return conf
def removeProcUnit(self, id):
if id in self.configurations:
self.configurations.pop(id)
def getReadUnit(self):
for obj in list(self.configurations.values()):
if obj.ELEMENTNAME == 'ReadUnit':
return obj
return None
def getProcUnit(self, id):
return self.configurations[id]
def getUnits(self):
keys = list(self.configurations)
keys.sort()
for key in keys:
yield self.configurations[key]
def updateUnit(self, id, **kwargs):
conf = self.configurations[id].update(**kwargs)
def makeXml(self):
xml = Element('Project')
xml.set('id', str(self.id))
xml.set('name', self.name)
xml.set('description', self.description)
for conf in self.configurations.values():
conf.makeXml(xml)
self.xml = xml
def writeXml(self, filename=None):
if filename == None:
if self.filename:
filename = self.filename
else:
filename = 'schain.xml'
if not filename:
print('filename has not been defined. Use setFilename(filename) for do it.')
return 0
abs_file = os.path.abspath(filename)
if not os.access(os.path.dirname(abs_file), os.W_OK):
print('No write permission on %s' % os.path.dirname(abs_file))
return 0
if os.path.isfile(abs_file) and not(os.access(abs_file, os.W_OK)):
print('File %s already exists and it could not be overwriten' % abs_file)
return 0
self.makeXml()
ElementTree(self.xml).write(abs_file, method='xml')
self.filename = abs_file
return 1
def readXml(self, filename):
abs_file = os.path.abspath(filename)
self.configurations = {}
try:
self.xml = ElementTree().parse(abs_file)
except:
log.error('Error reading %s, verify file format' % filename)
return 0
self.id = self.xml.get('id')
self.name = self.xml.get('name')
self.description = self.xml.get('description')
for element in self.xml:
if element.tag == 'ReadUnit':
conf = ReadUnitConf()
conf.readXml(element, self.id, self.err_queue)
self.configurations[conf.id] = conf
elif element.tag == 'ProcUnit':
conf = ProcUnitConf()
input_proc = self.configurations[element.get('inputId')]
conf.readXml(element, self.id, self.err_queue)
self.configurations[conf.id] = conf
self.filename = abs_file
return 1
def __str__(self):
text = '\nProject[id=%s, name=%s, description=%s]\n\n' % (
self.id,
self.name,
self.description,
)
for conf in self.configurations.values():
text += '{}'.format(conf)
return text
| |
= "127.0.0.1")
type: str
targetPort:
desc:
Target port of the server socket link. (Default = 27412)
type: int
listenIP:
desc:
IP address of the client application. (Default is
"127.0.0.1")
type: str
listenPort:
desc:
Listen port of the client socket link. (Default = 27413)
"""
# Check whether all keyword arguments are None.
if (targetIP is None) or (targetPort is None) or (listenIP is None) or (listenPort is None):
# Use the default values as set in the API.
r = etapi.Open(ctypes.c_char_p(appKey.encode("utf-8")))
else:
# Use the user-defined values.
r = etapi.Open(ctypes.c_char_p(appKey.encode("utf-8")), \
ctypes.c_char_p(targetIP.encode("utf-8")), ctypes.c_int32(targetPort), \
ctypes.c_char_p(listenIP.encode("utf-8")), ctypes.c_int32(listenPort))
# Check the result.
if not check_result(r):
self._error(r)
def IsOpen(self):
"""
desc:
Checks whether the API is open (but not whether the server is up
and running).
returns:
desc: True if the API is open, and False if not.
type: bool
"""
# Make a call to the API, and save the result in a variable.
is_open = ctypes.c_bool()
r = etapi.IsOpen(ctypes.byref(is_open))
# Check the result.
if check_result(r):
return is_open.value
else:
self._error(r)
def RecordData(self, start):
"""
desc:
Starts or stops data recording. For the time being (2021-01-29),
the location of the output data file is in user’s documents
folder, e.g.
C:\\User\\alea_technologies_gmbh\\IntelliGazeServer\\data\\Exp1\\User1\\
arguments:
start:
desc:
True if the recording to the Alea data file should start,
and False if the recording should stop.
type: bool
"""
# Make a call to the API.
r = etapi.RecordData(ctypes.c_bool(start))
# Check the result.
if not check_result(r):
self._error(r)
def SendTrigger(self, message):
"""
desc:
Sends a trigger message. If data recording is in progress, the
message will be recorded as well. Usually such messages are used
to separate trials within an experiment, and to record events such
as stimulus onsets/offsets, responses, etc.
arguments:
start:
desc:
The message that should be recorded in the data file.
type: str
"""
# Record the message to the data file.
r = etapi.SendTrigger(ctypes.c_char_p(message.encode("utf-8")))
# Check the result.
if not check_result(r):
self._error(r)
def WaitForData(self, timeOutMilliseconds):
"""
desc:
Blocks until the next sample comes in. It is not recommended to
use this function for data streaming. Use Sample instead.
arguments:
timeOutMilliseconds:
desc: Timeout in milliseconds. This function will return
on obtaining a sample or on timing out.
type: int
returns:
desc: The latest AleaData when it becomes available. This is a
CAleaData struct, or None if a timeout occurred.
type: ctypes.Structure
"""
# Create a sample struct to write incoming data to.
sample = CAleaData()
dwMilliseconds = ctypes.c_int32(timeOutMilliseconds)
# Make a call to the API, and save the result in a variable.
r = etapi.WaitForData(ctypes.byref(sample), dwMilliseconds)
# Check if the result is a timeout.
if r == -1:
# Set the sample to None.
sample = None
# Catch any other errors.
else:
if not check_result(r):
self._error(r)
return sample
def Close(self):
"""
desc:
Closes the API, releases the socket connection, and frees API
resources. Call close before quiting the client application!
"""
# Make a call to the API, and save the result in a variable.
r = etapi.Close()
# In the C API wrapper, the Close function doesn't actually return
# anything. Instead, it raises a warning about a blocking operation
# being interrupted by a call to WSACancelBlockingCall. Thus, the
# result is likely to be 1. We'll ignore this locally.
if r == 1:
return
# Check the result.
if not check_result(r):
self._error(r)
def Version(self):
"""
desc:
Returns the major.minor.build version and the device type. The
device type is coded 0 for IG30 systems, and 1 for IG15 systems.
returns:
desc: The version and device in a single string, formatted
"major.minor.build.device"
type: str
"""
# Make a call to the API, and save the result in a variable.
major = ctypes.c_int32()
minor = ctypes.c_int32()
build = ctypes.c_int32()
device = ctypes.c_int32()
r = etapi.Version(ctypes.byref(major), ctypes.byref(minor), \
ctypes.byref(build), ctypes.byref(device))
# Convert to string.
version = "{}.{}.{}.{}".format( \
major.value, minor.value, build.value, device.value)
# Check the result.
if check_result(r):
return version
else:
self._error(r)
def PerformCalibration(self, noPoints=9, location=0, \
randomizePoints=True, slowMode=False, audioFeedback=True, eye=0, \
calibrationImprovement=False, skipBadPoints=False, \
autoCalibration=True, backColor=(127,127,127), pointColor=(0,0,0), \
imageName=""):
"""
desc:
Performs an eye-tracker-controlled calibration: the tracker will
autonomously run through the calibration process, uncluding the
displaying of calibration points. The CalibrationDoneDelegate and
ResultCalibrationExCB callbacks will be called when the
calibration is finished or when an error occurs.
keywords:
noPoints:
desc:
Number of points used in the calibration. Choose from 1, 5,
9, or 16. (Default = 9)
type: int
location:
desc:
Indication of where the calibration points should be
presented. Choose from 0 (Full, outer points are 5% off
the monitor edge), 1 (Center, outer points are 20% off
the monitor edge), 2 (Bottom, points are in the lower half
of the monitor), 3 (Horizontal, points are located in a
horizontal line), and 4 (Vertical, points are located in
a vertical line). (Default = 0)
type: int
randomizePoints:
desc:
Set to True to allow the tracker to randomise the order in
which calibration points are shown. Some experienced users
have a tendency to anticipate where points will be shown,
and to produce a systematic calibration error by moving
their eyes to the next point too quickly. Shuffling the
points prevents this. (Default = True)
type: bool
slowMode:
desc:
Set to True to allow the tracker to show big and slow
calibration targets. (Default = False)
type: bool
audioFeedback:
desc:
Set to True to allow the tracker to play a sound when the
point jumps to a new position. (Default = True)
type: bool
eye:
desc:
Determines what eyes to calibrate and what eyes to track.
Choose from 0 (calibrate both eyes), 1 (calibrate the left
eye and track both eyes, "right glass eye"), 2 (calibrate
the right eye and track both eyes, "left glass eye"), 3
(calibrate and track only the left eye, "right pirate
eye"), or 4 (calibrate and track only the right eye, "left
pirate eye"). (Default = 0)
type: int
calibrationImprovement:
desc:
Set to True if outliers or skipped points from a previous
calibrations should be re-calibrated. Can only be done
when a previous calibration returned with an "Improvement"
suggestion! (Default = False)
type: bool
skipBadPoints:
desc:
When set to True, IntelliGaze will not get stuck at
uncalibratable points. It will skip them, and try to
complete the calibration without them. (Default = False)
type: bool
autoCalibration:
desc:
Set to True to allow the tracker to detect fixations and
accept points automatically. (Default = True)
type: bool
backColor:
desc:
RGB value of the background colour. This should have a
similar brightness to the experiment or application that
this calibration will be used with. The format is a tuple
with 3 ints in the range 0-255, representing red, green,
and blue. For example, (0,0,0) is black, and (255,255,255)
is white. (Default = (127,127,127) )
type: int
pointColor:
desc:
RGB value of the calibration point colour. This should
have a good contrast to the background. The format is a
tuple with 3 ints in the range 0-255, representing red,
green, and blue. For example, (255,0,0) is pure red.
(Default = (0,0,0) )
type: tuple
imageName:
desc:
Leave empty for the default circle, or provide a path to
an image file to use that image as calibration target. Use
the string "ANIMATION:PARROT" to calibrate with an
animation. (Default = "")
type: str
"""
# Convert the colours from RGB to 32-bit integer ARGB format.
alpha = 255 * 256 * 256 * 256
backColor = alpha + backColor[0] * 256 * 256 + backColor[1] * 256 \
+ | |
is 0.0001
:type theTolMax: float
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools_CorrectCurveOnSurface(*args)
def BOPTools_AlgoTools_CorrectPointOnCurve(*args):
"""
* Provides valid values of tolerances for the shape <theS> in terms of BRepCheck_InvalidPointOnCurve.
:param theS:
:type theS: TopoDS_Shape &
:param theTolMax: default value is 0.0001
:type theTolMax: float
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools_CorrectPointOnCurve(*args)
def BOPTools_AlgoTools_MakeNewVertex(*args):
"""
* Make a vertex using 3D-point <aP1> and 3D-tolerance value <aTol>
:param aP1:
:type aP1: gp_Pnt
:param aTol:
:type aTol: float
:param aNewVertex:
:type aNewVertex: TopoDS_Vertex &
:rtype: void
* Make a vertex using couple of vertices <aV1, aV2>
:param aV1:
:type aV1: TopoDS_Vertex &
:param aV2:
:type aV2: TopoDS_Vertex &
:param aNewVertex:
:type aNewVertex: TopoDS_Vertex &
:rtype: void
* Make a vertex in place of intersection between two edges <aE1, aE2> with parameters <aP1, aP2>
:param aE1:
:type aE1: TopoDS_Edge &
:param aP1:
:type aP1: float
:param aE2:
:type aE2: TopoDS_Edge &
:param aP2:
:type aP2: float
:param aNewVertex:
:type aNewVertex: TopoDS_Vertex &
:rtype: void
* Make a vertex in place of intersection between the edge <aE1> with parameter <aP1> and the face <aF2>
:param aE1:
:type aE1: TopoDS_Edge &
:param aP1:
:type aP1: float
:param aF2:
:type aF2: TopoDS_Face &
:param aNewVertex:
:type aNewVertex: TopoDS_Vertex &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools_MakeNewVertex(*args)
def BOPTools_AlgoTools_PointOnEdge(*args):
"""
* Compute a 3D-point on the edge <aEdge> at parameter <aPrm>
:param aEdge:
:type aEdge: TopoDS_Edge &
:param aPrm:
:type aPrm: float
:param aP:
:type aP: gp_Pnt
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools_PointOnEdge(*args)
def BOPTools_AlgoTools_MakeSplitEdge(*args):
"""
* Make the edge from base edge <aE1> and two vertices <aV1,aV2> at parameters <aP1,aP2>
:param aE1:
:type aE1: TopoDS_Edge &
:param aV1:
:type aV1: TopoDS_Vertex &
:param aP1:
:type aP1: float
:param aV2:
:type aV2: TopoDS_Vertex &
:param aP2:
:type aP2: float
:param aNewEdge:
:type aNewEdge: TopoDS_Edge &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools_MakeSplitEdge(*args)
def BOPTools_AlgoTools_MakeSectEdge(*args):
"""
* Make the edge from 3D-Curve <aIC> and two vertices <aV1,aV2> at parameters <aP1,aP2>
:param aIC:
:type aIC: IntTools_Curve &
:param aV1:
:type aV1: TopoDS_Vertex &
:param aP1:
:type aP1: float
:param aV2:
:type aV2: TopoDS_Vertex &
:param aP2:
:type aP2: float
:param aNewEdge:
:type aNewEdge: TopoDS_Edge &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools_MakeSectEdge(*args)
def BOPTools_AlgoTools_UpdateVertex(*args):
"""
* Update the tolerance value for vertex <aV> taking into account the fact that <aV> lays on the curve <aIC>
:param aIC:
:type aIC: IntTools_Curve &
:param aT:
:type aT: float
:param aV:
:type aV: TopoDS_Vertex &
:rtype: void
* Update the tolerance value for vertex <aV> taking into account the fact that <aV> lays on the edge <aE>
:param aE:
:type aE: TopoDS_Edge &
:param aT:
:type aT: float
:param aV:
:type aV: TopoDS_Vertex &
:rtype: void
* Update the tolerance value for vertex <aVN> taking into account the fact that <aVN> should cover tolerance zone of <aVF>
:param aVF:
:type aVF: TopoDS_Vertex &
:param aVN:
:type aVN: TopoDS_Vertex &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools_UpdateVertex(*args)
def BOPTools_AlgoTools_CorrectRange(*args):
"""
* Correct shrunk range <aSR> taking into account 3D-curve resolution and corresp. tolerances' values of <aE1>, <aE2>
:param aE1:
:type aE1: TopoDS_Edge &
:param aE2:
:type aE2: TopoDS_Edge &
:param aSR:
:type aSR: IntTools_Range &
:param aNewSR:
:type aNewSR: IntTools_Range &
:rtype: void
* Correct shrunk range <aSR> taking into account 3D-curve resolution and corresp. tolerances' values of <aE>, <aF>
:param aE:
:type aE: TopoDS_Edge &
:param aF:
:type aF: TopoDS_Face &
:param aSR:
:type aSR: IntTools_Range &
:param aNewSR:
:type aNewSR: IntTools_Range &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools_CorrectRange(*args)
def BOPTools_AlgoTools_IsBlockInOnFace(*args):
"""
* Returns True if PaveBlock <aPB> lays on the face <aF>, i.e the <PB> is IN or ON in 2D of <aF>
:param aShR:
:type aShR: IntTools_Range &
:param aF:
:type aF: TopoDS_Face &
:param aE:
:type aE: TopoDS_Edge &
:param aContext:
:type aContext: Handle_BOPInt_Context &
:rtype: bool
"""
return _BOPTools.BOPTools_AlgoTools_IsBlockInOnFace(*args)
def BOPTools_AlgoTools_IsMicroEdge(*args):
"""
* Checks if it is possible to compute shrunk range for the edge <aE>.
:param theEdge:
:type theEdge: TopoDS_Edge &
:param theContext:
:type theContext: Handle_BOPInt_Context &
:rtype: bool
"""
return _BOPTools.BOPTools_AlgoTools_IsMicroEdge(*args)
def BOPTools_AlgoTools_CorrectShapeTolerances(*args):
"""
* Corrects tolerance values of the sub-shapes of the shape <theS> if needed.
:param theS:
:type theS: TopoDS_Shape &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools_CorrectShapeTolerances(*args)
def BOPTools_AlgoTools_Dimension(*args):
"""
* Retutns dimension of the shape <theS>.
:param theS:
:type theS: TopoDS_Shape &
:rtype: int
"""
return _BOPTools.BOPTools_AlgoTools_Dimension(*args)
def BOPTools_AlgoTools_IsOpenShell(*args):
"""
* Returns true if the shell <theShell> is open
:param theShell:
:type theShell: TopoDS_Shell &
:rtype: bool
"""
return _BOPTools.BOPTools_AlgoTools_IsOpenShell(*args)
def BOPTools_AlgoTools_IsInvertedSolid(*args):
"""
* Returns true if the solid <theSolid> is inverted
:param theSolid:
:type theSolid: TopoDS_Solid &
:rtype: bool
"""
return _BOPTools.BOPTools_AlgoTools_IsInvertedSolid(*args)
class BOPTools_AlgoTools2D(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def BuildPCurveForEdgeOnFace(*args):
"""
* Compute P-Curve for the edge <aE> on the face <aF>
:param aE:
:type aE: TopoDS_Edge &
:param aF:
:type aF: TopoDS_Face &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools2D_BuildPCurveForEdgeOnFace(*args)
BuildPCurveForEdgeOnFace = staticmethod(BuildPCurveForEdgeOnFace)
def EdgeTangent(*args):
"""
* Compute tangent for the edge <aE> [in 3D] at parameter <aT>
:param anE:
:type anE: TopoDS_Edge &
:param aT:
:type aT: float
:param Tau:
:type Tau: gp_Vec
:rtype: bool
"""
return _BOPTools.BOPTools_AlgoTools2D_EdgeTangent(*args)
EdgeTangent = staticmethod(EdgeTangent)
def PointOnSurface(*args):
"""
* Compute surface parameters <U,V> of the face <aF> for the point from the edge <aE> at parameter <aT>.
:param aE:
:type aE: TopoDS_Edge &
:param aF:
:type aF: TopoDS_Face &
:param aT:
:type aT: float
:param U:
:type U: float &
:param V:
:type V: float &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools2D_PointOnSurface(*args)
PointOnSurface = staticmethod(PointOnSurface)
def HasCurveOnSurface(*args):
"""
* Returns True if the edge <aE> has P-Curve <aC> on surface <aF> . [aFirst, aLast] - range of the P-Curve [aToler] - reached tolerance If the P-Curve does not exist, aC.IsNull()=True.
:param aE:
:type aE: TopoDS_Edge &
:param aF:
:type aF: TopoDS_Face &
:param aC:
:type aC: Handle_Geom2d_Curve &
:param aFirst:
:type aFirst: float &
:param aLast:
:type aLast: float &
:param aToler:
:type aToler: float &
:rtype: bool
* Returns True if the edge <aE> has P-Curve <aC> on surface <aF> . If the P-Curve does not exist, aC.IsNull()=True.
:param aE:
:type aE: TopoDS_Edge &
:param aF:
:type aF: TopoDS_Face &
:rtype: bool
"""
return _BOPTools.BOPTools_AlgoTools2D_HasCurveOnSurface(*args)
HasCurveOnSurface = staticmethod(HasCurveOnSurface)
def AdjustPCurveOnFace(*args):
"""
* Adjust P-Curve <aC2D> (3D-curve <C3D>) on surface <aF> .
:param aF:
:type aF: TopoDS_Face &
:param C3D:
:type C3D: Handle_Geom_Curve &
:param aC2D:
:type aC2D: Handle_Geom2d_Curve &
:param aC2DA:
:type aC2DA: Handle_Geom2d_Curve &
:rtype: void
* Adjust P-Curve <aC2D> (3D-curve <C3D>) on surface <aF> . [aT1, aT2] - range to adjust
:param aF:
:type aF: TopoDS_Face &
:param aT1:
:type aT1: float
:param aT2:
:type aT2: float
:param aC2D:
:type aC2D: Handle_Geom2d_Curve &
:param aC2DA:
:type aC2DA: Handle_Geom2d_Curve &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools2D_AdjustPCurveOnFace(*args)
AdjustPCurveOnFace = staticmethod(AdjustPCurveOnFace)
def IntermediatePoint(*args):
"""
* Compute intermediate value in between [aFirst, aLast] .
:param aFirst:
:type aFirst: float
:param aLast:
:type aLast: float
:rtype: float
* Compute intermediate value of parameter for the edge <anE>.
:param anE:
:type anE: TopoDS_Edge &
:rtype: float
"""
return _BOPTools.BOPTools_AlgoTools2D_IntermediatePoint(*args)
IntermediatePoint = staticmethod(IntermediatePoint)
def BuildPCurveForEdgeOnPlane(*args):
"""
:param theE:
:type theE: TopoDS_Edge &
:param theF:
:type theF: TopoDS_Face &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools2D_BuildPCurveForEdgeOnPlane(*args)
BuildPCurveForEdgeOnPlane = staticmethod(BuildPCurveForEdgeOnPlane)
def BuildPCurveForEdgesOnPlane(*args):
"""
:param theLE:
:type theLE: BOPCol_ListOfShape &
:param theF:
:type theF: TopoDS_Face &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools2D_BuildPCurveForEdgesOnPlane(*args)
BuildPCurveForEdgesOnPlane = staticmethod(BuildPCurveForEdgesOnPlane)
def Make2D(*args):
"""
* Make P-Curve <aC> for the edge <aE> on surface <aF> . [aFirst, aLast] - range of the P-Curve [aToler] - reached tolerance
:param aE:
:type aE: TopoDS_Edge &
:param aF:
:type aF: TopoDS_Face &
:param aC:
:type aC: Handle_Geom2d_Curve &
:param aFirst:
:type aFirst: float &
:param aLast:
:type aLast: float &
:param aToler:
:type aToler: float &
:rtype: void
"""
return _BOPTools.BOPTools_AlgoTools2D_Make2D(*args)
Make2D = staticmethod(Make2D)
def MakePCurveOnFace(*args):
"""
* Make P-Curve <aC> for the 3D-curve <C3D> on surface <aF> . [aToler] - reached tolerance
:param aF:
:type aF: TopoDS_Face &
:param C3D:
:type C3D: Handle_Geom_Curve &
:param | |
vec_shape = torch.zeros(1, 10).float().to(self.device)
v, j, t, _ = self.mano_layer(vec_pose, vec_shape)
v = v.squeeze(0)
return v
@staticmethod
def assemble_pose_vec(gt_idx, gt_pose, var_idx, var_pose):
idx_tensor = torch.cat((torch.Tensor(gt_idx).long(), torch.Tensor(var_idx).long()))
pose_tensor = torch.cat((gt_pose, var_pose), dim=0)
pose_tensor = pose_tensor[torch.argsort(idx_tensor)]
return pose_tensor
@staticmethod
def transf_vectors(vectors, tsl, rot):
"""
vectors: [K, 3], tsl: [3, ], rot: [3, ]
return: [K, 3]
"""
rot_matrix = batch_rodrigues(rot.unsqueeze(0)).squeeze(0).reshape((3, 3))
vec = (rot_matrix @ vectors.T).T
vec = vec + tsl
return vec
def loss_fn(self, opt_val, const_val, ctrl_val, coef_val):
var_hand_pose_assembled = self.assemble_pose_vec(
const_val["hand_pose_gt_idx"],
const_val["hand_pose_gt_val"],
const_val["hand_pose_var_idx"],
opt_val["hand_pose_var_val"],
)
# dispatch hand var
vec_pose = var_hand_pose_assembled.unsqueeze(0)
if ctrl_val["optimize_hand_shape"]:
vec_shape = opt_val["hand_shape_var"].unsqueeze(0)
else:
vec_shape = const_val["hand_shape_gt"].unsqueeze(0)
if ctrl_val["optimize_hand_tsl"]:
vec_tsl = opt_val["hand_tsl_var"].unsqueeze(0)
else:
vec_tsl = const_val["hand_tsl_gt"].unsqueeze(0)
# rebuild hand
rebuild_verts, rebuild_joints, rebuild_transf, rebuild_full_pose = self.mano_layer(vec_pose, vec_shape)
# skel adaption
if ctrl_val["fhb"]:
adapt_joints, _ = self.adaptor(rebuild_verts)
adapt_joints = adapt_joints.transpose(1, 2)
rebuild_joints = rebuild_joints - adapt_joints[:, 9].unsqueeze(1)
rebuild_verts = rebuild_verts - adapt_joints[:, 9].unsqueeze(1)
rebuild_joints = rebuild_joints + vec_tsl
rebuild_verts = rebuild_verts + vec_tsl
rebuild_transf = rebuild_transf + torch.cat(
[
torch.cat([torch.zeros(3, 3).to(self.device), vec_tsl.view(3, -1)], dim=1),
torch.zeros(1, 4).to(self.device),
],
dim=0,
)
rebuild_verts_squeezed = rebuild_verts.squeeze(0)
# rebuild anchor
rebuild_anchor = self.anchor_layer(rebuild_verts)
rebuild_anchor = rebuild_anchor.contiguous() # TENSOR[1, 32, 3]
rebuild_anchor = rebuild_anchor.squeeze(0) # TENSOR[32, 3]
anchor_pos = rebuild_anchor[const_val["indexed_anchor_id"]] # TENSOR[NVALID, 3]
# dispatch obj var
if ctrl_val["optimize_obj"]:
obj_verts = self.transf_vectors(
const_val["obj_verts_3d_can"],
opt_val["obj_tsl_var"],
opt_val["obj_rot_var"],
)
full_obj_verts = self.transf_vectors(
const_val["full_obj_verts_3d"],
opt_val["obj_tsl_var"],
opt_val["obj_rot_var"],
)
full_obj_normals = self.transf_vectors(
const_val["full_obj_normals"],
torch.zeros(3, dtype=torch.float, device=self.device),
opt_val["obj_rot_var"],
)
else:
obj_verts = const_val["obj_verts_3d_gt"]
full_obj_verts = const_val["full_obj_verts_3d"]
full_obj_normals = const_val["full_obj_normals"]
# contact loss
contact_loss = FieldLoss.contact_loss(
anchor_pos,
obj_verts[const_val["indexed_vertex_id"]],
const_val["indexed_anchor_elasti"],
const_val["indexed_elasti_k"],
)
# repulsion loss
repulsion_loss = FieldLoss.full_repulsion_loss(
rebuild_verts_squeezed,
full_obj_verts,
full_obj_normals,
query=coef_val["repulsion_query"],
threshold=coef_val["repulsion_threshold"],
)
if ctrl_val["optimize_hand_pose"]:
# get hand loss
quat_norm_loss = HandLoss.pose_quat_norm_loss(var_hand_pose_assembled)
var_hand_pose_normalized = normalize_quaternion(var_hand_pose_assembled)
pose_reg_loss = HandLoss.pose_reg_loss(
var_hand_pose_normalized[const_val["hand_pose_var_idx"]], const_val["hand_pose_init_val"]
)
b_axis, u_axis, l_axis = self.axis_layer(rebuild_joints, rebuild_transf)
angle_axis = quaternion_to_angle_axis(var_hand_pose_normalized.reshape((16, 4)))
angle_axis = angle_axis[1:, :] # ignore global rot [15, 3]
axis = angle_axis / torch.norm(angle_axis, dim=1, keepdim=True)
angle = torch.norm(angle_axis, dim=1, keepdim=False)
# limit angle
angle_limit_loss = HandLoss.rotation_angle_loss(angle)
joint_b_axis_loss = HandLoss.joint_b_axis_loss(b_axis, axis)
joint_u_axis_loss = HandLoss.joint_u_axis_loss(u_axis, axis)
joint_l_limit_loss = HandLoss.joint_l_limit_loss(l_axis, axis)
edge_loss = HandLoss.edge_len_loss(
rebuild_verts_squeezed, const_val["hand_edges"], const_val["static_edge_len"]
)
else:
quat_norm_loss = torch.Tensor([0.0]).to(self.device)
pose_reg_loss = torch.Tensor([0.0]).to(self.device)
angle_limit_loss = torch.Tensor([0.0]).to(self.device)
joint_b_axis_loss = torch.Tensor([0.0]).to(self.device)
joint_u_axis_loss = torch.Tensor([0.0]).to(self.device)
joint_l_limit_loss = torch.Tensor([0.0]).to(self.device)
edge_loss = torch.Tensor([0.0]).to(self.device)
# pose_reg_loss_to_zero = torch.Tensor([0.0]).to(self.device)
if ctrl_val["optimize_hand_shape"]:
shape_reg_loss = HandLoss.shape_reg_loss(opt_val["hand_shape_var"], const_val["hand_shape_init"])
else:
shape_reg_loss = torch.Tensor([0.0]).to(self.device)
if ctrl_val["optimize_hand_tsl"]:
hand_tsl_loss = HandLoss.hand_tsl_loss(opt_val["hand_tsl_var"], const_val["hand_tsl_init"])
else:
hand_tsl_loss = torch.Tensor([0.0]).to(self.device)
if ctrl_val["optimize_obj"]:
obj_transf_loss = ObjectLoss.obj_transf_loss(
opt_val["obj_tsl_var"], opt_val["obj_rot_var"], const_val["obj_tsl_init"], const_val["obj_rot_init"]
)
else:
obj_transf_loss = torch.Tensor([0.0]).to(self.device)
loss = (
# ============= HAND ANATOMICAL LOSS
1.0 * quat_norm_loss
+ 1.0 * angle_limit_loss
+ 1.0 * edge_loss
+ 0.1 * joint_b_axis_loss
+ 0.1 * joint_u_axis_loss
+ 0.1 * joint_l_limit_loss
# ============= ELAST POTENTIONAL ENERGY
+ coef_val["lambda_contact_loss"] * contact_loss
+ coef_val["lambda_repulsion_loss"] * repulsion_loss
# ============= OFFSET LOSS
+ 1.0 * pose_reg_loss
+ 1.0 * shape_reg_loss
+ 1.0 * hand_tsl_loss
+ 1.0 * obj_transf_loss
)
# debug: runtime viz
if self.runtime_vis:
if self.ctrl_val["optimize_obj"]:
full_obj_verts = self.transf_vectors(
self.const_val["full_obj_verts_3d"],
self.opt_val["obj_tsl_var"].detach(),
self.opt_val["obj_rot_var"].detach(),
)
else:
full_obj_verts = self.const_val["full_obj_verts_3d"]
if not ctrl_val["optimize_hand_pose"]:
b_axis, u_axis, l_axis = self.axis_layer(rebuild_joints, rebuild_transf) # mend this up
self.runtime_show(rebuild_verts, b_axis, u_axis, l_axis, rebuild_transf, full_obj_verts)
return (
loss,
{
"quat_norm_loss": quat_norm_loss.detach().cpu().item(),
"angle_limit_loss": angle_limit_loss.detach().cpu().item(),
"edge_loss": edge_loss.detach().cpu().item(),
"joint_b_axis_loss": joint_b_axis_loss.detach().cpu().item(),
"joint_u_axis_loss": joint_u_axis_loss.detach().cpu().item(),
"joint_l_limit_loss": joint_l_limit_loss.detach().cpu().item(),
"contact_loss": contact_loss.detach().cpu().item(),
"repulsion_loss": repulsion_loss.detach().cpu().item(),
"pose_reg_loss": pose_reg_loss.detach().cpu().item(),
"hand_tsl_loss": hand_tsl_loss.detach().cpu().item(),
"obj_transf_loss": obj_transf_loss.detach().cpu().item(),
},
)
def optimize(self, progress=False):
if progress:
bar = trange(self.n_iter, position=3)
bar_hand = trange(0, position=2, bar_format="{desc}")
bar_contact = trange(0, position=1, bar_format="{desc}")
bar_axis = trange(0, position=0, bar_format="{desc}")
else:
bar = range(self.n_iter)
loss = torch.Tensor([1000.0]).to(self.device)
loss_dict = {}
for _ in bar:
if self.optimizing:
self.optimizer.zero_grad()
loss, loss_dict = self.loss_fn(self.opt_val, self.const_val, self.ctrl_val, self.coef_val)
if self.optimizing:
loss.backward()
self.optimizer.step()
self.scheduler.step(loss)
if progress:
bar.set_description("TOTAL LOSS {:4e}".format(loss.item()))
try:
bar_hand.set_description(
colored("HAND_REGUL_LOSS: ", "yellow")
+ "QN={:.3e} PR={:.3e} EG={:.3e}".format(
loss_dict["quat_norm_loss"], # QN
loss_dict["pose_reg_loss"], # PR
loss_dict["edge_loss"], # Edge
)
)
except:
pass
try:
bar_contact.set_description(
colored("HO_CONTACT_LOSS: ", "blue")
+ "Conta={:.3e}, Repul={:.3e}, OT={:.3e}".format(
loss_dict["contact_loss"], # Conta
loss_dict["repulsion_loss"], # Repul
loss_dict["obj_transf_loss"], # OT
)
)
except:
pass
try:
bar_axis.set_description(
colored("ANGLE_LOSS: ", "cyan")
+ "AL={:.3e} JB={:.3e} JU={:.3e} JL={:.3e}".format(
loss_dict["angle_limit_loss"], # AL
loss_dict["joint_b_axis_loss"], # JB
loss_dict["joint_u_axis_loss"], # JU
loss_dict["joint_l_limit_loss"], # JL
)
)
except:
pass
return loss.item(), loss_dict
def recover_hand(self, squeeze_out=True):
vars_hand_pose_assembled = self.assemble_pose_vec(
self.const_val["hand_pose_gt_idx"],
self.const_val["hand_pose_gt_val"],
self.const_val["hand_pose_var_idx"],
self.opt_val["hand_pose_var_val"],
).detach()
vars_hand_pose_normalized = normalize_quaternion(vars_hand_pose_assembled)
vec_pose = vars_hand_pose_normalized.unsqueeze(0)
if self.ctrl_val["optimize_hand_shape"]:
vec_shape = self.opt_val["hand_shape_var"].detach().unsqueeze(0)
else:
vec_shape = self.const_val["hand_shape_gt"].unsqueeze(0)
if self.ctrl_val["optimize_hand_tsl"]:
vec_tsl = self.opt_val["hand_tsl_var"].detach().unsqueeze(0)
else:
vec_tsl = self.const_val["hand_tsl_gt"].unsqueeze(0)
device = vec_pose.device
rebuild_verts, rebuild_joints, rebuild_transf, rebuild_full_pose = self.mano_layer(vec_pose, vec_shape)
# skel adaption
if self.ctrl_val["fhb"]:
adapt_joints, _ = self.adaptor(rebuild_verts)
adapt_joints = adapt_joints.transpose(1, 2)
rebuild_joints = rebuild_joints - adapt_joints[:, 9].unsqueeze(1)
rebuild_verts = rebuild_verts - adapt_joints[:, 9].unsqueeze(1)
rebuild_verts = rebuild_verts + vec_tsl
rebuild_joints = rebuild_joints + vec_tsl
rebuild_transf = rebuild_transf + torch.cat(
[
torch.cat((torch.zeros((3, 3), device=device), vec_tsl.T), dim=1),
torch.zeros((1, 4), device=device),
],
dim=0,
)
if squeeze_out:
rebuild_verts, rebuild_joints, rebuild_transf = (
rebuild_verts.squeeze(0),
rebuild_joints.squeeze(0),
rebuild_transf.squeeze(0),
)
return rebuild_verts, rebuild_joints, rebuild_transf
def recover_hand_pose(self):
vars_hand_pose_assembled = self.assemble_pose_vec(
self.const_val["hand_pose_gt_idx"],
self.const_val["hand_pose_gt_val"],
self.const_val["hand_pose_var_idx"],
self.opt_val["hand_pose_var_val"],
).detach()
vars_hand_pose_normalized = normalize_quaternion(vars_hand_pose_assembled)
return vars_hand_pose_normalized
def recover_obj(self):
if self.ctrl_val["optimize_obj"]:
obj_verts = self.transf_vectors(
self.const_val["full_obj_verts_3d"],
self.opt_val["obj_tsl_var"].detach(),
self.opt_val["obj_rot_var"].detach(),
)
else:
obj_verts = self.const_val["full_obj_verts_3d"]
return obj_verts
def obj_rot_np(self):
if self.ctrl_val["optimize_obj"]:
res = self.opt_val["obj_rot_var"].detach().cpu().numpy()
return res
else:
raise RuntimeError("not optimizing obj, cannot get obj_rot")
def obj_tsl_np(self):
if self.ctrl_val["optimize_obj"]:
res = self.opt_val["obj_tsl_var"].detach().cpu().numpy()
return res
else:
raise RuntimeError("not optimizing obj, cannot get obj_tsl")
def runtime_show(self, hand_verts, b_axis, u_axis, l_axis, hand_transf, obj_verts):
has_rot = False
b_axis = b_axis.detach().cpu().squeeze(0).numpy()
u_axis = u_axis.detach().cpu().squeeze(0).numpy()
l_axis = l_axis.detach().cpu().squeeze(0).numpy()
hand_transf = hand_transf.detach().cpu().squeeze(0).numpy()
b_rot_ms = []
u_rot_ms = []
l_rot_ms = []
while True:
self.runtime_vis["hand_mesh"].vertices = o3d.utility.Vector3dVector(
np.array(hand_verts.detach().cpu().squeeze(0))
)
self.runtime_vis["hand_mesh"].compute_vertex_normals()
self.runtime_vis["obj_mesh"].vertices = o3d.utility.Vector3dVector(
np.array(obj_verts.detach().cpu().squeeze(0))
)
self.runtime_vis["obj_mesh"].compute_vertex_normals()
if not has_rot:
for i in range(16):
if not i:
continue
b_rot = caculate_align_mat(b_axis[i - 1])
b_rot_ms.append(b_rot)
self.runtime_vis["b_axis"][i] = self.runtime_vis["b_axis"][i].rotate(b_rot, center=(0, 0, 0))
self.runtime_vis["b_axis"][i] = self.runtime_vis["b_axis"][i].rotate(
hand_transf[i][:3, :3], center=(0, 0, 0)
)
self.runtime_vis["b_axis"][i] = self.runtime_vis["b_axis"][i].translate(hand_transf[i][:3, 3].T)
self.runtime_vis["window"].update_geometry(self.runtime_vis["b_axis"][i])
u_rot = caculate_align_mat(u_axis[i - 1])
u_rot_ms.append(u_rot)
self.runtime_vis["up_axis"][i] = self.runtime_vis["up_axis"][i].rotate(u_rot, center=(0, 0, 0))
self.runtime_vis["up_axis"][i] = self.runtime_vis["up_axis"][i].rotate(
hand_transf[i][:3, :3], center=(0, 0, 0)
)
self.runtime_vis["up_axis"][i] = self.runtime_vis["up_axis"][i].translate(hand_transf[i][:3, 3].T)
self.runtime_vis["window"].update_geometry(self.runtime_vis["up_axis"][i])
l_rot = caculate_align_mat(l_axis[i - 1])
l_rot_ms.append(l_rot)
self.runtime_vis["l_axis"][i] = self.runtime_vis["l_axis"][i].rotate(l_rot, center=(0, 0, 0))
self.runtime_vis["l_axis"][i] = self.runtime_vis["l_axis"][i].rotate(
hand_transf[i][:3, :3], center=(0, 0, 0)
)
self.runtime_vis["l_axis"][i] = self.runtime_vis["l_axis"][i].translate(hand_transf[i][:3, 3].T)
self.runtime_vis["window"].update_geometry(self.runtime_vis["l_axis"][i])
has_rot = True
self.runtime_vis["window"].update_geometry(self.runtime_vis["hand_mesh"])
self.runtime_vis["window"].update_geometry(self.runtime_vis["obj_mesh"])
self.runtime_vis["window"].update_renderer()
if not self.runtime_vis["window"].poll_events():
break
for i in range(16):
if not i:
continue
self.runtime_vis["b_axis"][i] = self.runtime_vis["b_axis"][i].translate(-hand_transf[i][:3, 3].T)
self.runtime_vis["b_axis"][i] = self.runtime_vis["b_axis"][i].rotate(
hand_transf[i][:3, :3].T, center=(0, 0, 0)
)
self.runtime_vis["b_axis"][i] = self.runtime_vis["b_axis"][i].rotate(b_rot_ms[i - 1].T, center=(0, 0, 0))
self.runtime_vis["up_axis"][i] = self.runtime_vis["up_axis"][i].translate(-hand_transf[i][:3, 3].T)
self.runtime_vis["up_axis"][i] = self.runtime_vis["up_axis"][i].rotate(
hand_transf[i][:3, :3].T, center=(0, 0, 0)
)
self.runtime_vis["up_axis"][i] = self.runtime_vis["up_axis"][i].rotate(u_rot_ms[i - 1].T, center=(0, 0, 0))
self.runtime_vis["l_axis"][i] = self.runtime_vis["l_axis"][i].translate(-hand_transf[i][:3, 3].T)
self.runtime_vis["l_axis"][i] = self.runtime_vis["l_axis"][i].rotate(
hand_transf[i][:3, :3].T, center=(0, 0, 0)
)
self.runtime_vis["l_axis"][i] = self.runtime_vis["l_axis"][i].rotate(l_rot_ms[i - 1].T, center=(0, 0, 0))
return
def caculate_align_mat(vec):
vec = vec / np.linalg.norm(vec)
z_unit_Arr = np.array([0, 0, 1])
z_mat = np.array(
[
[0, -z_unit_Arr[2], z_unit_Arr[1]],
[z_unit_Arr[2], 0, -z_unit_Arr[0]],
[-z_unit_Arr[1], z_unit_Arr[0], 0],
]
)
z_c_vec = np.matmul(z_mat, vec)
z_c_vec_mat = np.array(
[
[0, -z_c_vec[2], z_c_vec[1]],
[z_c_vec[2], 0, -z_c_vec[0]],
[-z_c_vec[1], z_c_vec[0], 0],
]
)
if np.dot(z_unit_Arr, vec) == -1:
qTrans_Mat = -np.eye(3, 3)
elif np.dot(z_unit_Arr, vec) == 1:
qTrans_Mat = np.eye(3, 3)
else:
qTrans_Mat = np.eye(3, 3) + z_c_vec_mat + np.matmul(z_c_vec_mat, z_c_vec_mat) / (1 + np.dot(z_unit_Arr, vec))
return qTrans_Mat
def init_runtime_viz(
hand_verts_gt,
hand_verts_init,
obj_verts_gt,
hand_faces,
obj_verts_cur,
obj_faces_cur,
contact_info,
cam_extr=None,
):
hand_mesh_gt = o3d.geometry.TriangleMesh()
hand_mesh_gt.triangles = o3d.utility.Vector3iVector(hand_faces)
hand_mesh_gt.vertices = o3d.utility.Vector3dVector(hand_verts_gt)
hand_mesh_gt.vertex_colors = o3d.utility.Vector3dVector(np.array([[0.0, 0.0, 1.0]] * len(hand_verts_gt)))
hand_mesh_gt.compute_vertex_normals()
obj_mesh_gt = o3d.geometry.TriangleMesh()
obj_mesh_gt.triangles = o3d.utility.Vector3iVector(obj_faces_cur)
obj_mesh_gt.vertices = o3d.utility.Vector3dVector(obj_verts_gt)
obj_mesh_gt.vertex_colors = o3d.utility.Vector3dVector(np.array([[1.0, 0.0, 0.0]] * len(obj_verts_gt)))
obj_mesh_gt.compute_vertex_normals()
hand_mesh_cur = o3d.geometry.TriangleMesh()
hand_mesh_cur.triangles = o3d.utility.Vector3iVector(hand_faces)
obj_mesh = o3d.geometry.TriangleMesh()
obj_mesh.triangles = o3d.utility.Vector3iVector(obj_faces_cur)
obj_mesh.vertices = o3d.utility.Vector3dVector(obj_verts_cur)
obj_colors = create_vertex_color(contact_info, "contact_region")
obj_mesh.compute_vertex_normals()
obj_mesh.vertex_colors = o3d.utility.Vector3dVector(obj_colors)
vis_cur = o3d.visualization.VisualizerWithKeyCallback()
vis_cur.create_window(window_name="Runtime Hand", width=1080, height=1080)
vis_cur.add_geometry(obj_mesh)
vis_cur.add_geometry(hand_mesh_cur)
vis_cur.add_geometry(hand_mesh_gt)
vis_cur.add_geometry(obj_mesh_gt)
back_axis_list = []
up_axis_list = []
left_axis_list = []
for i in range(16):
b = o3d.geometry.TriangleMesh.create_arrow(
cylinder_radius=0.0015,
cone_radius=0.002,
cylinder_height=0.05,
cone_height=0.008,
resolution=20,
cylinder_split=4,
cone_split=1,
)
b.paint_uniform_color([45 / 255.0, 220 / 255.0, 190 / 255.0])
b.compute_vertex_normals()
vis_cur.add_geometry(b)
back_axis_list.append(b)
u = o3d.geometry.TriangleMesh.create_arrow(
cylinder_radius=0.0015,
cone_radius=0.002,
cylinder_height=0.04,
cone_height=0.008,
resolution=20,
cylinder_split=4,
cone_split=1,
)
u.paint_uniform_color([250 / 255.0, 100 / 255.0, 100 / 255.0])
u.compute_vertex_normals()
vis_cur.add_geometry(u)
up_axis_list.append(u)
l = o3d.geometry.TriangleMesh.create_arrow(
cylinder_radius=0.0015,
cone_radius=0.002,
cylinder_height=0.04,
cone_height=0.008,
resolution=20,
cylinder_split=4,
cone_split=1,
)
| |
OR GREATER THAN 130 DEG F.
*ERROR* TYPE 25 ********************************************************************************
THE INITIAL AIR WET-BULB TEMPERATURE IS LESS THAN 0 OR GREATER THAN THE DRY-BULB TEMPERATURE.
*ERROR* TYPE 26 ********************************************************************************
THE DESIGN DRY-BULB TEMPERATURE IS LESS THAN 40 OR GREATER THAN 100 DEG F.
*ERROR* TYPE 27 ********************************************************************************
THE DESIGN WET-BULB TEMPERATURE IS LESS THAN 40 OR
GREATER THAN THE DESIGN DRY-BULB TEMPERATURE.
*ERROR* TYPE 28 ********************************************************************************
THE VALUE ENTERED FOR THIS HEAD LOSS COEFFICIENT IS LESS THAN 0 OR GREATER THAN 1000.
*ERROR* TYPE 29 ********************************************************************************
THE NUMBER OF SUBSEGMENTS IN THIS LINE SEGMENT IS LESS THAN 1 OR GREATER THAN 1200.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 30 ********************************************************************************
THE TOTAL NUMBER OF LINE SUBSEGMENTS HAS EXCEEDED 1200.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 31 ********************************************************************************
AN IMPROPER SUBSEGMENT NUMBER HAS BEEN ENTERED AS A LIMIT FOR A STEADY STATE HEATING OR COOLING SOURCE.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 32 ********************************************************************************
THE START OF SIMULATION PERIOD IS LESS THAN 0 OR GREATER THAN 24 HRS. THE DEFAULT VALUE OF 17 HRS WILL BE USED.
*ERROR* TYPE 33 ********************************************************************************
THE DESIGN MONTH ENTERED IS LESS THAN 1 OR GREATER THAN 12. THE DEFAULT VALUE OF 7 WILL BE USED.
*ERROR* TYPE 34 ********************************************************************************
THE HEAT SINK THERMAL CONDUCTIVITY ENTERED IS LESS THAN 0.005 OR GREATER THAN 2.0 BTU/HR-FT-DEG. F.
*ERROR* TYPE 35 ********************************************************************************
THE HEAT SINK THERMAL DIFFUSIVITY ENTERED IS LESS THAN 0.005 OR GREATER THAN 1.0 SQ FT/HR.
*ERROR* TYPE 36 ********************************************************************************
THE MINUTES PORTION OF THE DESIGN TIME IS GREATER THAN 59. THE DEFAULT VALUE OF ZERO (0) WILL BE USED.
*ERROR* TYPE 37 ********************************************************************************
THE STACK HEIGHT OF THIS VENTILATION SHAFT IS LESS THAN -1000 OR GREATER THAN 1000 FEET.
*ERROR* TYPE 38 ********************************************************************************
THE AREA OF THIS VENTILATION SHAFT IS LESS THAN 3 OR GREATER THAN 3000 SQ FT.
*ERROR* TYPE 39 ********************************************************************************
THE PERIMETER OF THIS VENTILATION SHAFT IS LESS THAN 5 OR GREATER THAN 500 FEET.
*ERROR* TYPE 40 ********************************************************************************
THE LENGTH OF THIS VENTILATION SHAFT SEGMENT IS LESS THAN 0
OR GREATER THAN 2000 FEET.
*ERROR* TYPE 41 ********************************************************************************
THE AVERAGE NUMBER OF LOOPS ADJACENT TO EACH LOOP IN THE SENSE OF SHARING ONE OR MORE SECTIONS HAS BEEN EXCEEDED.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 42 ********************************************************************************
THE NUMBER OF SUBSEGMENTS IN THIS VENTILATION SHAFT IS LESS THAN 1 OR GREATER THAN 1600.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 43 ********************************************************************************
THE TOTAL NUMBER OF LINE AND VENT SHAFT SUBSEGMENTS IN THIS SYSTEM IS GREATER THAN 1600.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 44 ********************************************************************************
A CONTROLLED ZONE ( TYPE 1 ) MUST NOT CONTAIN A VENTILATION SHAFT.
*ERROR* TYPE 45 ********************************************************************************
THE NUMBER OF DATA POINTS FOR THIS SPEED VS TIME PROFILE IS LESS THAN 2 OR GREATER THAN 100.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 46 ********************************************************************************
THE TIME DATA POINTS HAVE BEEN ENTERED OUT OF ORDER OR HAVE A TIME SPAN GREATER THAN 1 DAY.
*ERROR* TYPE 47 ********************************************************************************
A TRAIN SPEED LESS THAN 0 OR GREATER THAN 250 MPH HAS BEEN ENTERED.
*ERROR* TYPE 48 ********************************************************************************
THE NUMBER OF TRACK SECTIONS PLUS TWICE THE NUMBER OF SCHEDULED STOPS
PLUS THE NUMBER OF LINE SEGMENTS THRU WHICH THE ROUTE PASSES PLUS 2 IS GREATER THAN 620
OR THE NUMBER OF STOPS ENTERED IS NEGATIVE.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 49 ********************************************************************************
THE LOCATION OF THIS SCHEDULED STOP IS NOT WITHIN THE LIMITS OF THE TRACK SECTIONS.
*ERROR* TYPE 50 ********************************************************************************
THE DWELL TIME AT A SCHEDULED STOP IS GREATER THAN 900 SECONDS.
*ERROR* TYPE 51 ********************************************************************************
THE NUMBER OF CARS IN THIS SUBWAY TRAIN IS LESS THAN 1 OR GREATER THAN 20.
*ERROR* TYPE 52 ********************************************************************************
THE LENGTH OF THIS SUBWAY TRAIN IS LESS THAN 25 OR GREATER THAN 1,500 FT.
*ERROR* TYPE 53 ********************************************************************************
THE FRONTAL AREA OF THIS SUBWAY TRAIN IS LESS THAN 25 OR GREATER THAN 300 SQ FT.
*ERROR* TYPE 54 ********************************************************************************
THE DECELERATION RATE FOR THIS TRAIN IS LESS THAN 0.5 OR GREATER THAN 5.0 MPH/SEC.
*ERROR* TYPE 55 ********************************************************************************
THE SKIN FRICTION COEFFICIENT FOR THIS TRAIN IS LESS THAN 0.0001 OR GREATER THAN 0.20.
*ERROR* TYPE 56 ********************************************************************************
THE AVERAGE EMPTY CAR WEIGHT IS LESS THAN 5 OR GREATER THAN 150 TONS.
*ERROR* TYPE 57 ********************************************************************************
THE SENSIBLE HEAT REJECTION RATE PER CAR IS LESS THAN 0 OR GREATER THAN 1,000,000 BTU/HR.
*ERROR* TYPE 58 ********************************************************************************
THE LATENT HEAT REJECTION RATE PER CAR IS LESS THAN -50,000 OR GREATER THAN 200,000 BTU/HR.
*ERROR* TYPE 59 ********************************************************************************
THIS WHEEL DIAMETER IS LESS THAN 20 OR GREATER THAN 40 IN.
*ERROR* TYPE 60 ********************************************************************************
THIS GEAR RATIO IS LESS THAN 1 TO 1 OR GREATER THAN 20 TO 1.
*ERROR* TYPE 61 ********************************************************************************
TOTAL MOTOR RESISTANCES ENTERED ARE LESS THAN 0.001 OR GREATER THAN 3.0 OHMS.
*ERROR* TYPE 62 ********************************************************************************
THESE RESISTANCE VELOCITIES ARE LESS THAN 0 MPH, GREATER THAN 100 MPH, OR NOT ENTERED IN THE PROPER ORDER.
*ERROR* TYPE 63 ********************************************************************************
THE TUNNEL WALL THICKNESS IS LESS THAN 0 OR GREATER THAN 30 FEET.
*ERROR* TYPE 64 ********************************************************************************
THE NUMBER OF GROUPS OF TRAINS ENTERED IS LESS THAN 1 OR GREATER THAN 25.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING | |
# Authors: <NAME> <<EMAIL>>
# License: Apache 2.0
from PyNomaly import loop
import logging
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
import pytest
from sklearn.datasets import load_iris
from sklearn.metrics import roc_auc_score
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_warns
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# flag to enable or disable NUMBA
NUMBA = False
# load the iris dataset
# and randomly permute it
rng = check_random_state(0)
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# fixtures
@pytest.fixture()
def X_n8() -> np.ndarray:
# Toy sample (the last two samples are outliers):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 2], [1, 2], [2, 1], [5, 3],
[-4, 2]])
return X
@pytest.fixture()
def X_n120() -> np.ndarray:
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
return X
@pytest.fixture()
def X_n140_outliers(X_n120) -> np.ndarray:
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X = np.r_[X_n120, X_outliers]
return X
@pytest.fixture()
def X_n1000() -> np.ndarray:
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(1000, 2)
return X
def test_loop(X_n8) -> None:
# Test LocalOutlierProbability:
clf = loop.LocalOutlierProbability(X_n8, n_neighbors=5, use_numba=NUMBA)
score = clf.fit().local_outlier_probabilities
share_outlier = 2. / 8.
predictions = [-1 if s > share_outlier else 1 for s in score]
assert_array_equal(predictions, 6 * [1] + 2 * [-1])
# Assert smallest outlier score is greater than largest inlier score:
assert_greater(np.min(score[-2:]), np.max(score[:-2]))
# Test the DataFrame functionality
X_df = pd.DataFrame(X_n8)
# Test LocalOutlierProbability:
clf = loop.LocalOutlierProbability(X_df, n_neighbors=5, use_numba=NUMBA)
score = clf.fit().local_outlier_probabilities
share_outlier = 2. / 8.
predictions = [-1 if s > share_outlier else 1 for s in score]
assert_array_equal(predictions, 6 * [1] + 2 * [-1])
# Assert smallest outlier score is greater than largest inlier score:
assert_greater(np.min(score[-2:]), np.max(score[:-2]))
def test_loop_performance(X_n120) -> None:
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X_n120, X_outliers]
X_labels = np.r_[
np.repeat(1, X_n120.shape[0]), np.repeat(-1, X_outliers.shape[0])]
# fit the model
clf = loop.LocalOutlierProbability(
X_test,
n_neighbors=X_test.shape[0] - 1,
# test the progress bar
progress_bar=True,
use_numba=NUMBA
)
# predict scores (the lower, the more normal)
score = clf.fit().local_outlier_probabilities
share_outlier = X_outliers.shape[0] / X_test.shape[0]
X_pred = [-1 if s > share_outlier else 1 for s in score]
# check that roc_auc is good
assert_greater(roc_auc_score(X_pred, X_labels), .98)
def test_input_nodata(X_n140_outliers) -> None:
with pytest.warns(UserWarning) as record:
# attempt to fit loop without data or a distance matrix
loop.LocalOutlierProbability(n_neighbors=X_n140_outliers.shape[0] - 1,
use_numba=NUMBA)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "Data or a distance matrix must be provided."
def test_bad_input_argument(X_n140_outliers) -> None:
with pytest.warns(UserWarning) as record:
# attempt to fit loop with a string input for n_neighbors
loop.LocalOutlierProbability(X_n140_outliers,
n_neighbors=str(
X_n140_outliers.shape[0] - 1),
use_numba=NUMBA
)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "Argument 'n_neighbors' is not of type (<class 'int'>, " \
"<class 'numpy.integer'>)."
def test_neighbor_zero(X_n120) -> None:
clf = loop.LocalOutlierProbability(X_n120, n_neighbors=0, use_numba=NUMBA)
with pytest.warns(UserWarning) as record:
# attempt to fit loop with a 0 neighbor count
clf.fit()
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "n_neighbors must be greater than 0. Fit with 10 instead."
def test_input_distonly(X_n120) -> None:
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_n120)
d, idx = neigh.kneighbors(X_n120, n_neighbors=10, return_distance=True)
with pytest.warns(UserWarning) as record:
# attempt to fit loop with a distance matrix and no neighbor matrix
loop.LocalOutlierProbability(distance_matrix=d, use_numba=NUMBA)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "A neighbor index matrix and distance matrix must both " \
"be provided when not using raw input data."
def test_input_neighboronly(X_n120) -> None:
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_n120)
d, idx = neigh.kneighbors(X_n120, n_neighbors=10, return_distance=True)
with pytest.warns(UserWarning) as record:
# attempt to fit loop with a neighbor matrix and no distance matrix
loop.LocalOutlierProbability(neighbor_matrix=idx, use_numba=NUMBA)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "Data or a distance matrix must be provided."
def test_input_too_many(X_n120) -> None:
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_n120)
d, idx = neigh.kneighbors(X_n120, n_neighbors=10, return_distance=True)
with pytest.warns(UserWarning) as record:
# attempt to fit loop with data and a distance matrix
loop.LocalOutlierProbability(X_n120, distance_matrix=d,
neighbor_matrix=idx, use_numba=NUMBA)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "Only one of the following may be provided: data or a " \
"distance matrix (not both)."
def test_distance_neighbor_shape_mismatch(X_n120) -> None:
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_n120)
d, idx = neigh.kneighbors(X_n120, n_neighbors=10, return_distance=True)
# generate distance and neighbor indices of a different shape
neigh_2 = NearestNeighbors(metric='euclidean')
neigh_2.fit(X_n120)
d_2, idx_2 = neigh.kneighbors(X_n120, n_neighbors=5, return_distance=True)
with pytest.warns(UserWarning) as record:
# attempt to fit loop with a mismatch in shapes
loop.LocalOutlierProbability(
distance_matrix=d,
neighbor_matrix=idx_2,
n_neighbors=5,
use_numba=NUMBA
)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "The shape of the distance and neighbor " \
"index matrices must match."
def test_input_neighbor_mismatch(X_n120) -> None:
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_n120)
d, idx = neigh.kneighbors(X_n120, n_neighbors=5, return_distance=True)
with pytest.warns(UserWarning) as record:
# attempt to fit loop with a neighbor size mismatch
loop.LocalOutlierProbability(distance_matrix=d,
neighbor_matrix=idx,
n_neighbors=10,
use_numba=NUMBA)
# check that only one warning was raised
assert len(record) == 1
# check that the message matches
assert record[0].message.args[
0] == "The shape of the distance or " \
"neighbor index matrix does not " \
"match the number of neighbors " \
"specified."
def test_loop_dist_matrix(X_n120) -> None:
# generate distance and neighbor indices
neigh = NearestNeighbors(metric='euclidean')
neigh.fit(X_n120)
d, idx = neigh.kneighbors(X_n120, n_neighbors=10, return_distance=True)
# fit loop using data and distance matrix
clf1 = loop.LocalOutlierProbability(X_n120, use_numba=NUMBA)
clf2 = loop.LocalOutlierProbability(distance_matrix=d, neighbor_matrix=idx,
use_numba=NUMBA)
scores1 = clf1.fit().local_outlier_probabilities
scores2 = clf2.fit().local_outlier_probabilities
# compare the agreement between the results
assert_almost_equal(scores1, scores2, decimal=1)
def test_lambda_values(X_n140_outliers) -> None:
# Fit the model with different extent (lambda) values
clf1 = loop.LocalOutlierProbability(X_n140_outliers, extent=1,
use_numba=NUMBA)
clf2 = loop.LocalOutlierProbability(X_n140_outliers, extent=2,
use_numba=NUMBA)
clf3 = loop.LocalOutlierProbability(X_n140_outliers, extent=3,
use_numba=NUMBA)
# predict scores (the lower, the more normal)
score1 = clf1.fit().local_outlier_probabilities
score2 = clf2.fit().local_outlier_probabilities
score3 = clf3.fit().local_outlier_probabilities
# Get the mean of all the scores
score_mean1 = np.mean(score1)
score_mean2 = np.mean(score2)
score_mean3 = np.mean(score3)
# check that expected the means align with expectation
assert_greater(score_mean1, score_mean2)
assert_greater(score_mean2, score_mean3)
def test_parameters(X_n120) -> None:
# fit the model
clf = loop.LocalOutlierProbability(X_n120, use_numba=NUMBA).fit()
# check that the model has attributes post fit
assert (hasattr(clf, 'n_neighbors') and
clf.n_neighbors is not None)
assert (hasattr(clf, 'extent') and
clf.extent is not None)
assert (hasattr(clf, 'cluster_labels') and
clf._cluster_labels() is not None)
assert (hasattr(clf, 'prob_distances') and
clf.prob_distances is not None)
assert (hasattr(clf, 'prob_distances_ev') and
clf.prob_distances_ev is not None)
assert (hasattr(clf, 'norm_prob_local_outlier_factor') and
clf.norm_prob_local_outlier_factor is not None)
assert (hasattr(clf, 'local_outlier_probabilities') and
clf.local_outlier_probabilities is not None)
def test_n_neighbors() -> None:
X = iris.data
clf = loop.LocalOutlierProbability(X, n_neighbors=500,
use_numba=NUMBA).fit()
assert_equal(clf.n_neighbors, X.shape[0] - 1)
clf = loop.LocalOutlierProbability(X, n_neighbors=500, use_numba=NUMBA)
assert_warns(UserWarning, clf.fit)
assert_equal(clf.n_neighbors, X.shape[0] - 1)
def test_extent() -> None:
X = np.array([[1, 1], [1, 0]])
clf = loop.LocalOutlierProbability(X, n_neighbors=2, extent=4,
use_numba=NUMBA)
assert_warns(UserWarning, clf.fit)
def test_data_format() -> None:
X = [1.3, 1.1, 0.9, 1.4, 1.5, 3.2]
clf = loop.LocalOutlierProbability(X, n_neighbors=3, use_numba=NUMBA)
assert_warns(UserWarning, clf.fit)
def test_missing_values() -> None:
X = np.array([1.3, 1.1, 0.9, 1.4, 1.5, np.nan, 3.2])
clf = loop.LocalOutlierProbability(X, n_neighbors=3, use_numba=NUMBA)
with pytest.raises(SystemExit) as record_a, pytest.warns(
UserWarning) as record_b:
clf.fit()
assert record_a.type == SystemExit
# check that only one warning was raised
assert len(record_b) == 1
# check that the message matches
assert record_b[0].message.args[
0] == "Method does not support missing values in input data."
def test_small_cluster_size(X_n140_outliers) -> None:
# Generate cluster labels
a = [0] * 120
b = [1] * 18
cluster_labels = a + b
clf = loop.LocalOutlierProbability(
X_n140_outliers,
n_neighbors=50,
cluster_labels=cluster_labels,
use_numba=NUMBA
)
with pytest.raises(SystemExit) as | |
None,
end_tangent: "Vertex" = None,
) -> "SplineEdge":
"""Add a :class:`SplineEdge`.
Args:
fit_points: points through which the spline must go, at least 3 fit
points are required. list of (x, y)-tuples
control_points: affects the shape of the spline, mandatory and
AutoCAD crashes on invalid data. list of (x, y)-tuples
knot_values: (knot vector) mandatory and AutoCAD crashes on invalid
data. list of floats; `ezdxf` provides two tool functions to
calculate valid knot values: :func:`ezdxf.math.uniform_knot_vector`,
:func:`ezdxf.math.open_uniform_knot_vector` (default if ``None``)
weights: weight of control point, not mandatory, list of floats.
degree: degree of spline (int)
periodic: 1 for periodic spline, 0 for none periodic spline
start_tangent: start_tangent as 2d vector, optional
end_tangent: end_tangent as 2d vector, optional
.. warning::
Unlike for the spline entity AutoCAD does not calculate the
necessary `knot_values` for the spline edge itself. On the contrary,
if the `knot_values` in the spline edge are missing or invalid
AutoCAD **crashes**.
"""
spline = SplineEdge()
if fit_points is not None:
spline.fit_points = Vec2.list(fit_points)
if control_points is not None:
spline.control_points = Vec2.list(control_points)
if knot_values is not None:
spline.knot_values = list(knot_values)
else:
spline.knot_values = list(
open_uniform_knot_vector(len(spline.control_points), degree + 1)
)
if weights is not None:
spline.weights = list(weights)
spline.degree = degree
spline.rational = int(bool(len(spline.weights)))
spline.periodic = int(periodic)
if start_tangent is not None:
spline.start_tangent = Vec2(start_tangent)
if end_tangent is not None:
spline.end_tangent = Vec2(end_tangent)
self.edges.append(spline)
return spline
def add_spline_control_frame(
self,
fit_points: Iterable[Tuple[float, float]],
degree: int = 3,
method: str = "distance",
) -> "SplineEdge":
bspline = global_bspline_interpolation(
fit_points=fit_points, degree=degree, method=method
)
return self.add_spline(
fit_points=fit_points,
control_points=bspline.control_points,
knot_values=bspline.knots(),
)
def clear(self) -> None:
"""Delete all edges."""
self.edges = []
def export_dxf(self, tagwriter: "TagWriter", dxftype: str) -> None:
tagwriter.write_tag2(92, int(self.path_type_flags))
tagwriter.write_tag2(93, len(self.edges))
for edge in self.edges:
edge.export_dxf(tagwriter)
export_source_boundary_objects(tagwriter, self.source_boundary_objects)
class LineEdge:
EDGE_TYPE = "LineEdge" # 2021-05-31: deprecated use type
type = EdgeType.LINE
def __init__(self):
self.start = Vec2(0, 0) # OCS!
self.end = Vec2(0, 0) # OCS!
@classmethod
def load_tags(cls, tags: Tags) -> "LineEdge":
edge = cls()
for tag in tags:
code, value = tag
if code == 10:
edge.start = Vec2(value)
elif code == 11:
edge.end = Vec2(value)
return edge
def export_dxf(self, tagwriter: "TagWriter") -> None:
tagwriter.write_tag2(72, 1) # edge type
x, y, *_ = self.start
tagwriter.write_tag2(10, float(x))
tagwriter.write_tag2(20, float(y))
x, y, *_ = self.end
tagwriter.write_tag2(11, float(x))
tagwriter.write_tag2(21, float(y))
def transform(self, ocs: OCSTransform, elevation: float) -> None:
self.start = ocs.transform_2d_vertex(self.start, elevation)
self.end = ocs.transform_2d_vertex(self.end, elevation)
class ArcEdge:
type = EdgeType.ARC # 2021-05-31: deprecated use type
EDGE_TYPE = "ArcEdge"
def __init__(self):
self.center = Vec2(0.0, 0.0)
self.radius: float = 1.0
# Start- and end angles are always stored in counter-clockwise order!
self.start_angle: float = 0.0
self.end_angle: float = 360.0
# Flag to preserve the required orientation for DXF export:
self.ccw: bool = True
@classmethod
def load_tags(cls, tags: Tags) -> "ArcEdge":
edge = cls()
start = 0.0
end = 0.0
for tag in tags:
code, value = tag
if code == 10:
edge.center = Vec2(value)
elif code == 40:
edge.radius = value
elif code == 50:
start = value
elif code == 51:
end = value
elif code == 73:
edge.ccw = bool(value)
# The DXF format stores the clockwise oriented start- and end angles
# for HATCH arc- and ellipse edges as complementary angle (360-angle).
# This is a problem in many ways for processing clockwise oriented
# angles correct, especially rotation transformation won't work.
# Solution: convert clockwise angles into counter-clockwise angles
# and swap start- and end angle at loading and exporting, the ccw flag
# preserves the required orientation of the arc:
if edge.ccw:
edge.start_angle = start
edge.end_angle = end
else:
edge.start_angle = 360.0 - end
edge.end_angle = 360.0 - start
return edge
def export_dxf(self, tagwriter: "TagWriter") -> None:
tagwriter.write_tag2(72, 2) # edge type
x, y, *_ = self.center
if self.ccw:
start = self.start_angle
end = self.end_angle
else:
# swap and convert to complementary angles: see ArcEdge.load_tags()
# for explanation
start = 360.0 - self.end_angle
end = 360.0 - self.start_angle
tagwriter.write_tag2(10, float(x))
tagwriter.write_tag2(20, float(y))
tagwriter.write_tag2(40, self.radius)
tagwriter.write_tag2(50, start)
tagwriter.write_tag2(51, end)
tagwriter.write_tag2(73, int(self.ccw))
def transform(self, ocs: OCSTransform, elevation: float) -> None:
self.center = ocs.transform_2d_vertex(self.center, elevation)
self.radius = ocs.transform_length(Vec3(self.radius, 0, 0))
if not math.isclose(
arc_angle_span_deg(self.start_angle, self.end_angle), 360.0
): # open arc
# The transformation of the ccw flag is not necessary for the current
# implementation of OCS transformations. The arc angles have always
# a counter clockwise orientation around the extrusion vector and
# this orientation is preserved even for mirroring, which flips the
# extrusion vector to (0, 0, -1) for entities in the xy-plane.
self.start_angle = ocs.transform_deg_angle(self.start_angle)
self.end_angle = ocs.transform_deg_angle(self.end_angle)
else: # full circle
# Transform only start point to preserve the connection point to
# adjacent edges:
self.start_angle = ocs.transform_deg_angle(self.start_angle)
# ArcEdge is represented in counter-clockwise orientation:
self.end_angle = self.start_angle + 360.0
class EllipseEdge:
EDGE_TYPE = "EllipseEdge" # 2021-05-31: deprecated use type
type = EdgeType.ELLIPSE
def __init__(self):
self.center = Vec2((0.0, 0.0))
# Endpoint of major axis relative to center point (in OCS)
self.major_axis = Vec2((1.0, 0.0))
self.ratio: float = 1.0
# Start- and end angles are always stored in counter-clockwise order!
self.start_angle: float = 0.0 # start param, not a real angle
self.end_angle: float = 360.0 # end param, not a real angle
# Flag to preserve the required orientation for DXF export:
self.ccw: bool = True
@property
def start_param(self) -> float:
return angle_to_param(self.ratio, math.radians(self.start_angle))
@start_param.setter
def start_param(self, param: float) -> None:
self.start_angle = math.degrees(param_to_angle(self.ratio, param))
@property
def end_param(self) -> float:
return angle_to_param(self.ratio, math.radians(self.end_angle))
@end_param.setter
def end_param(self, param: float) -> None:
self.end_angle = math.degrees(param_to_angle(self.ratio, param))
@classmethod
def load_tags(cls, tags: Tags) -> "EllipseEdge":
edge = cls()
start = 0.0
end = 0.0
for tag in tags:
code, value = tag
if code == 10:
edge.center = Vec2(value)
elif code == 11:
edge.major_axis = Vec2(value)
elif code == 40:
edge.ratio = value
elif code == 50:
start = value
elif code == 51:
end = value
elif code == 73:
edge.ccw = bool(value)
if edge.ccw:
edge.start_angle = start
edge.end_angle = end
else:
# The DXF format stores the clockwise oriented start- and end angles
# for HATCH arc- and ellipse edges as complementary angle (360-angle).
# This is a problem in many ways for processing clockwise oriented
# angles correct, especially rotation transformation won't work.
# Solution: convert clockwise angles into counter-clockwise angles
# and swap start- and end angle at loading and exporting, the ccw flag
# preserves the required orientation of the ellipse:
edge.start_angle = 360.0 - end
edge.end_angle = 360.0 - start
return edge
def export_dxf(self, tagwriter: "TagWriter") -> None:
tagwriter.write_tag2(72, 3) # edge type
x, y, *_ = self.center
tagwriter.write_tag2(10, float(x))
tagwriter.write_tag2(20, float(y))
x, y, *_ = self.major_axis
tagwriter.write_tag2(11, float(x))
tagwriter.write_tag2(21, float(y))
tagwriter.write_tag2(40, self.ratio)
if self.ccw:
start = self.start_angle
end = self.end_angle
else:
# swap and convert to complementary angles: see EllipseEdge.load_tags()
# for explanation
start = 360.0 - self.end_angle
end = 360.0 - self.start_angle
tagwriter.write_tag2(50, start)
tagwriter.write_tag2(51, end)
tagwriter.write_tag2(73, int(self.ccw))
def construction_tool(self):
"""Returns ConstructionEllipse() for the OCS representation."""
return ConstructionEllipse(
center=Vec3(self.center),
major_axis=Vec3(self.major_axis),
extrusion=Vec3(0, 0, 1),
ratio=self.ratio,
# 1. ConstructionEllipse() is always in ccw orientation
# 2. Start- and end params are always stored in ccw orientation
start_param=self.start_param,
end_param=self.end_param,
)
def transform(self, ocs: OCSTransform, elevation: float) -> None:
e = self.construction_tool()
# Transform old OCS representation to WCS
ocs_to_wcs = ocs.old_ocs.to_wcs
e.center = ocs_to_wcs(e.center.replace(z=elevation))
e.major_axis = ocs_to_wcs(e.major_axis)
e.extrusion = ocs.old_extrusion
# Apply matrix transformation
e.transform(ocs.m)
# Transform WCS representation to new OCS
wcs_to_ocs = ocs.new_ocs.from_wcs
self.center = wcs_to_ocs(e.center).vec2 # type: ignore
self.major_axis = wcs_to_ocs(e.major_axis).vec2 # type: ignore
self.ratio = e.ratio
# ConstructionEllipse() is always in ccw orientation
# Start- and end params are always stored in ccw orientation
self.start_param = e.start_param
self.end_param = e.end_param
# The transformation of the ccw flag is not necessary for the current
# implementation of OCS transformations.
# An ellipse as boundary edge is an OCS entity!
# The ellipse angles | |
import datetime
import json
import logging
import os
import urllib.error
import urllib.parse
import urllib.request
import requests
from pajbot.managers.emote import EmoteManager
from pajbot.models.emote import Emote
log = logging.getLogger(__name__)
class APIBase:
def __init__(self, strict=False):
self.strict = strict
self.base_url = ""
self.headers = {}
def _get(self, url, headers={}):
try:
req = urllib.request.Request(url, None, headers)
response = urllib.request.urlopen(req, timeout=30)
except urllib.error.HTTPError as e:
# If strict is True, return the proper HTTP error. Otherwise
if self.strict:
raise e
return None
except:
log.exception("Unhandled exception in APIBase._get")
return None
try:
return response.read().decode("utf-8")
except:
log.exception("Unhandled exception in APIBase._get while reading response")
return None
def _get_json(self, url, headers={}):
data = self._get(url, headers)
try:
if data and isinstance(data, str):
return json.loads(data)
return data
except:
log.exception("Caught exception while trying to parse json data.")
return None
def get_url(self, endpoints=[], parameters={}, base=None):
return (
(base or self.base_url)
+ "/".join(endpoints)
+ ("" if not parameters else "?" + urllib.parse.urlencode(parameters))
)
def getraw(self, endpoints=[], parameters={}, base=None):
return self._get(self.get_url(endpoints, parameters, base=base), self.headers)
def get(self, endpoints, parameters={}, base=None):
data = self.getraw(endpoints, parameters, base=base)
try:
if data and isinstance(data, str):
return json.loads(data)
return data
except:
log.exception("Unhandled exception in APIBase.get")
return None
def _req_with_data(self, url, data, method="POST"):
"""Send data along with the request.
Arguments:
url -- What url we should send the request to
data -- Dictionary of all data we should send along with the request
Keyword arguments:
method -- What method we should use for the request. (default: 'POST')
"""
try:
encoded_data = urllib.parse.urlencode(data).encode("utf-8")
req = urllib.request.Request(url, data=encoded_data, headers=self.headers, method=method)
return urllib.request.urlopen(req, timeout=30)
except urllib.error.HTTPError as e:
# Irregular HTTP code
if e.code in [422]:
log.error(e)
else:
try:
error_data_raw = e.fp.read().decode("utf-8")
error_data = json.loads(error_data_raw)
log.error("HTTP Error %s: %s: %s", error_data["status"], error_data["error"], error_data["message"])
except:
log.exception("Unhandled exception in exception handler")
return None
except:
log.exception("Unhandled exception caught in method `req_with_data`")
return None
def post(self, endpoints=[], parameters={}, data={}, base=None):
try:
response = self._req_with_data(self.get_url(endpoints, parameters, base=base), data, method="POST")
return response.read().decode("utf-8")
except:
log.exception("Unhandled exception caught in method `post`")
return None
def put(self, endpoints=[], parameters={}, data={}, base=None):
try:
response = self._req_with_data(self.get_url(endpoints, parameters, base=base), data, method="PUT")
return response.read().decode("utf-8")
except:
log.exception("Unhandled exception caught in method `put`")
return None
def fill_in_url_scheme(url, default_scheme="https"):
"""Fill in the scheme part of a given URL string, e.g.
with given inputs of url = "//example.com/abc" and
default_scheme="https", the output would be
"https://example.com/abc"
If the given input URL already has a scheme, the scheme is not altered.
"""
parsed_template = urllib.parse.urlparse(url, scheme=default_scheme)
return urllib.parse.urlunparse(parsed_template)
class BTTVApi(APIBase):
def __init__(self, strict=True):
APIBase.__init__(self, strict)
self.base_url = "https://api.betterttv.net/2/"
self.headers = {}
@staticmethod
def parse_emotes(api_response_data):
url_template = api_response_data.get("urlTemplate", "//cdn.betterttv.net/emote/{{id}}/{{image}}")
url_template = fill_in_url_scheme(url_template)
def get_url(emote_hash, size):
return url_template.replace("{{id}}", emote_hash).replace("{{image}}", size + "x")
emotes = []
for emote in api_response_data["emotes"]:
emote_hash = emote["id"]
emotes.append(
Emote(
code=emote["code"],
provider="bttv",
id=emote_hash,
urls={"1": get_url(emote_hash, "1"), "2": get_url(emote_hash, "2"), "4": get_url(emote_hash, "3")},
)
)
return emotes
def get_global_emotes(self):
"""Returns a list of global BTTV emotes in the standard Emote format."""
try:
data = self.get(["emotes"])
return self.parse_emotes(data)
except urllib.error.HTTPError as e:
if e.code == 502:
log.warning("Bad Gateway when getting global emotes.")
elif e.code == 503:
log.warning("Service Unavailable when getting global emotes.")
else:
log.exception("Unhandled HTTP error code")
except KeyError:
log.exception("Caught exception while trying to get global BTTV emotes")
except:
log.exception("Uncaught exception in BTTVApi.get_global_emotes")
return []
def get_channel_emotes(self, channel):
"""Returns a list of channel-specific BTTV emotes in the standard Emote format."""
try:
data = self.get(["channels", channel])
return self.parse_emotes(data)
except urllib.error.HTTPError as e:
if e.code == 502:
log.warning("Bad Gateway when getting channel emotes.")
elif e.code == 503:
log.warning("Service Unavailable when getting channel emotes.")
elif e.code == 404:
log.info("There are no BTTV Emotes for this channel.")
else:
log.exception("Unhandled HTTP error code")
except KeyError:
log.exception("Caught exception while trying to get channel-specific BTTV emotes")
except:
log.exception("Uncaught exception in BTTVApi.get_channel_emotes")
return []
class FFZApi(APIBase):
def __init__(self, strict=True):
APIBase.__init__(self, strict)
self.base_url = "https://api.frankerfacez.com/v1/"
self.headers = {}
@staticmethod
def parse_sets(emote_sets):
emotes = []
for emote_set in emote_sets.values():
for emote in emote_set["emoticons"]:
# FFZ returns relative URLs (e.g. //cdn.frankerfacez.com/...)
# so we fill in the scheme if it's missing :)
urls = {size: fill_in_url_scheme(url) for size, url in emote["urls"].items()}
emotes.append(Emote(code=emote["name"], provider="ffz", id=emote["id"], urls=urls))
return emotes
def get_global_emotes(self):
"""Returns a list of global FFZ emotes in the standard Emote format."""
try:
data = self.get(["set", "global"])
# FFZ returns a number of global sets but only a subset of them should be available
# in all channels, those are available under "default_sets", e.g. a list of set IDs like this:
# [ 3, 6, 7, 14342 ]
global_set_ids = data["default_sets"]
global_sets = {str(set_id): data["sets"][str(set_id)] for set_id in global_set_ids}
return self.parse_sets(global_sets)
except urllib.error.HTTPError as e:
if e.code == 502:
log.warning("Bad Gateway when getting global emotes.")
elif e.code == 503:
log.warning("Service Unavailable when getting global emotes.")
else:
log.exception("Unhandled HTTP error code")
except KeyError:
log.exception("Caught exception while trying to get global FFZ emotes")
except:
log.exception("Uncaught exception in FFZApi.get_global_emotes")
# error
return []
def get_channel_emotes(self, channel):
"""Returns a list of channel-specific FFZ emotes in the standard Emote format."""
try:
data = self.get(["room", channel])
return self.parse_sets(data["sets"])
except urllib.error.HTTPError as e:
if e.code == 502:
log.warning("Bad Gateway when getting channel emotes.")
elif e.code == 503:
log.warning("Service Unavailable when getting channel emotes.")
elif e.code == 404:
log.info("There are no FFZ Emotes for this channel.")
else:
log.exception("Unhandled HTTP error code")
except KeyError:
log.exception("Caught exception while trying to get channel-specific FFZ emotes")
except:
log.exception("Uncaught exception in FFZApi.get_channel_emotes")
return []
class TwitchAPI(APIBase):
def __init__(self, client_id=None, oauth=None, strict=True):
"""
Keyword arguments:
client_id -- twitch api client_id
oauth -- twitch api oauth
strict -- Whether the APIBase object should be strict in its errors or not. (default: True)
"""
APIBase.__init__(self, strict)
self.base_url = "https://api.twitch.tv/api/"
self.kraken_url = "{}/kraken/".format(os.environ.get("APIPROXY_HOST", "http://127.0.0.1:7221"))
self.tmi_url = "https://tmi.twitch.tv/"
self.headers = {"Accept": "application/vnd.twitchtv.v3+json"}
if client_id:
self.headers["Client-ID"] = client_id
if oauth:
self.headers["Authorization"] = "OAuth " + oauth
@staticmethod
def parse_datetime(datetime_str):
"""Parses date strings in the format of 2015-09-11T23:01:11+00:00
to a naive datetime object."""
return datetime.datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%SZ")
def get_subscribers(self, streamer, limit=25, offset=0, attempt=0):
"""Returns a list of subscribers within the limit+offset range.
Arguments:
streamer -- the streamer whose subscriber we want to fetch.
Keyword arguments:
limit -- Maximum number of subscribers fetched. (default: 25)
offset - Offset for pagination. (default: 0)
"""
if attempt > 2:
return False, False, True
try:
data = self.get(
["channels", streamer, "subscriptions"], {"limit": limit, "offset": offset}, base=self.kraken_url
)
if data:
return [u["user"]["name"] for u in data["subscriptions"]], False, False
except urllib.error.HTTPError as e:
# Non-standard HTTP Code returned.
log.warning("Non-standard HTTP Code returned while fetching subscribers: %s", e.code)
log.info(e)
log.info(e.fp.read())
except:
log.exception("Unhandled exception caught in TwitchAPI.get_subscribers")
return [], attempt + 1, False
def get_chatters(self, streamer):
"""Returns a list of chatters in the stream."""
chatters = []
try:
data = self.get(["group", "user", streamer, "chatters"], base=self.tmi_url)
ch = data["chatters"]
chatters = ch["moderators"] + ch["staff"] + ch["admins"] + ch["global_mods"] + ch["viewers"]
except urllib.error.HTTPError as e:
if e.code == 502:
log.warning("Bad Gateway when getting chatters.")
elif e.code == 503:
log.warning("Service Unavailable when getting chatters.")
else:
log.exception("Unhandled HTTP error code")
except KeyError:
log.exception("Caught exception while trying to get chatters for streamer %s", streamer)
except:
log.exception("Uncaught exception in TwitchAPI.get_chatters")
return chatters
def get_status(self, streamer):
"""Returns information about a user or stream on twitch.
This method will _ALWAYS_ return a dictionary with a bunch of data.
Check if the key 'error' is set to False to know there's some valid data in there.
The key 'exists' is set to False if the user does not exist, True if the user exists and None if we don't know.
"""
stream_status = {
"error": True,
"exists": None,
"online": False,
"viewers": -1,
"game": None,
"title": None,
"created_at": None,
"followers": -1,
"views": -1,
"broadcast_id": None,
}
data = None
try:
data = self.get(["streams", streamer], base=self.kraken_url)
stream_status["error"] = False
stream_status["online"] = "stream" in data and data["stream"] is not None
if stream_status["online"]:
stream_status["viewers"] = data["stream"]["viewers"]
stream_status["game"] = data["stream"]["game"]
stream_status["title"] = data["stream"]["channel"]["status"]
stream_status["created_at"] = data["stream"]["created_at"]
stream_status["followers"] = data["stream"]["channel"]["followers"]
stream_status["views"] = data["stream"]["channel"]["views"]
stream_status["broadcast_id"] = data["stream"]["_id"]
except urllib.error.HTTPError as e:
if e.code == 404:
stream_status["exists"] = False
data = json.loads(e.read().decode("utf-8"))
elif e.code == 502:
log.warning("Bad Gateway when | |
= (resdf.loc['error']/resdf.loc[datavar]).mean()
r2 = (resdf.loc[simvar].corr(resdf.loc[datavar])) ** 2
return maeom, mape, r2
def trunc_log(df):
"""Return log10 of a dataframe, ignoring negative base values"""
df[df <= 0] = np.NaN
return np.log10(df)
def process_results(scriptname, eqtime, earlytime, gof_vars, iqr_list=[],
means_list=[], perc_list=[0.05,0.95], delta=None, duration=None):
"""Read single-country calibration results and calculate additional
outputs, incl. percentiles and IQRs, returning a compiled pd.Series
of processed country results `c_res` and a Dataframe with country
time series outputs (deaths & infs) `datdf`"""
# Read country parameter values from .out file
outlist = read_outvals(f'{scriptname}.out')
varnames = [n.split('[')[0] for n in [var[0] for var in outlist]]
vals = [var[1] for var in outlist]
c_res = pd.Series(vals, index=varnames)
# Read full country calibration results, extract death & inf data for output
resdf = pd.read_csv(f'{scriptname}.tab', sep='\t', index_col=0, error_bad_lines=False)
resdf.index = [n.split('[')[0] for n in resdf.index] # Separate subscripts
datdf = resdf.loc[['DeathsOverTimeRaw', 'eqDeath', 'DataFlowOverTime', 'inf exp']]
# Pull end-of-run values from full country results
endtime = len(resdf.columns) - 1
c_res['cum_dpm'] = resdf.loc['CumulativeDpm'][-1]
c_res['cum_dpm_del'] = resdf.loc['CumulativeDpm'][-(1 + delta)]
c_res['IFR'] = resdf.loc['IFR'][0]
c_res['SFrac_mdl'] = resdf.loc['SFrac'][-1]
c_res['end_dpm_mdl'] = resdf.loc['eqDeath'][-1]
c_res['end_alpha_mdl'] = resdf.loc['alpha'][-1]
c_res['end_gdn_mdl'] = resdf.loc['g death'][-1]
c_res['chg_dpm_mdl'] = (c_res['end_dpm_mdl'] - resdf.loc['eqDeath'][-2])/c_res['end_dpm_mdl']
# Calculate mean Re and GOF statistics
for var in means_list:
c_res[f"avg_{var}"] = calc_mean(resdf, var, limit=hist_window)
c_res['maeom'], c_res['mape'], c_res['r2'] = calc_gof(resdf, gof_vars[0], gof_vars[1])
# Calculate various projections based on analytical approximation
calc_eq_vals(c_res, eqtime, duration=duration) # for projected eqtime
calc_end_vals(c_res, c_res['cum_dpm'], c_res['IFR'],
endtime, delta, c_res['cum_dpm_del'], duration) # for end of run
calc_eq_vals(c_res, earlytime, colnames=['ear_gdn', 'ear_alpha', 'ear_dpm'],
duration=duration) # for estimate of early responsiveness
# Calculate IQR and percentile values to append to country results
iqrs, percs = generate_intervals(scriptname, c_res['cum_dpm'], c_res['IFR'], eqtime, endtime,
iqr_list, perc_list, delta, c_res['cum_dpm_del'], duration)
c_res = pd.concat([c_res, iqrs, *percs])
return c_res, datdf
def regress_deaths(dthdf):
"""Read in death and expected equilibrium death data, and regress
for all countries day by day, recording regression coefficients"""
regdf = pd.DataFrame(index=dthdf.columns, columns=['n_R', 'RLM'])
for i in dthdf.columns:
# Correct for negative values and take log10
Y_log = trunc_log(dthdf.loc['dpm'][i])
X_log = trunc_log(dthdf.loc['eqDeath'][i])
# If insufficient datapoints for date, skip and record NaN
if Y_log.count() < 3:
regdf.loc[i] = np.NaN
# Otherwise run robust linear regression
else:
mod_RLM = sm.RLM(Y_log, X_log, missing='drop')
fit_RLM = mod_RLM.fit()
# Record observations and coefficient
regdf.loc[i] = [fit_RLM.nobs, fit_RLM.params[0]]
regdf.to_csv(f'./{baserunname}_regression.tab', sep='\t')
return regdf
def compile_senslist(sens_vars, vals_dict, multipliers):
"""Compile setvals list for use with MultiScript for sensitivity
analysis, based on specified `multipliers` and parameters to test
as listed in `sens_vars`"""
def lookup_dict(vars_list, vals_list):
return [type(sub)(vals_list[var] for var in sub) for sub in vars_list]
def lookup_mult(vars_list, mult):
return [type(sub)(var * mult for var in sub) for sub in vars_list]
# Pull corresponding values for sensitivity parameters
base_vals = lookup_dict(sens_vars, vals_dict)
# Generate suffix strings for runnames
sfxs = [str(mult).replace('.','') for mult in multipliers]
# Calculate setval values for sensitivity parameters
mult_list = [lookup_mult(base_vals, mult) for mult in multipliers]
# Compile & return list of setval tuples
sens_list = [[(varnames, mults[i], sfxs[j]) for j, mults in enumerate(mult_list)]
for i, varnames in enumerate(sens_vars)]
return sens_list
# In[ ]:
controlfilename = input("Enter control file name (with extension):")
cf = json.load(open(controlfilename, 'r'))
# Unpack controlfile into variables
for k,v in cf.items():
exec(k + '=v')
for setting in [datasettings, analysissettings]:
for k, v in setting.items():
exec(k + '=v')
# In[ ]:
# Set up files in run directory and initialise logfile
master = Script(cf)
master.changes.extend(scenariolist)
master.copy_model_files(f"{baserunname}_IterCal")
for f in [f"../{controlfilename}", "../ImportData.cmd", "../CovRegInput.frm"]:
copy(f, "./")
logfile = f"{os.getcwd()}/{baserunname}.log"
write_log(f"-----\nStarting new log at {time.ctime()}\nReady to work!", logfile)
# In[ ]:
##### THIS CELL IS FOR UPDATING DATA ONLY #####
# Read main and mobility data from URL for raw data CSVs
data = pd.read_csv(data_url)
mobdata = pd.read_csv(mobdata_url)
# Extract dictionary mapping of ISO codes to OWID country names
names = data.filter(['iso_code','location'], axis=1).drop_duplicates()
names.replace({'iso_code': renames}, inplace=True) # Rename unusual ISO codes as needed
c_dict = dict(zip(names['location'], names['iso_code']))
# Subset CSV to relevant data fields
data = data.filter(['iso_code','date', 'total_cases', 'new_cases_smoothed',
'new_deaths_smoothed_per_million', 'population', 'gdp_per_capita'], axis=1)
# Rename fields as needed
data.columns = ['iso_code','date', 'total_cases', 'new_cases',
'new_dpm', 'population', 'gdp_per_capita']
table = pd.pivot_table(data, values=['total_cases', 'new_cases', 'new_dpm', 'population',
'gdp_per_capita'], index='date', columns='iso_code')
table = table.T
table.index.names = ['field', 'iso_code']
table.columns = pd.to_datetime(table.columns)
# Drop countries with fewer cases than specified threshold, insufficient datapoints, or zero deaths
dropidx_cases = table.loc['total_cases'].index[table.loc['total_cases'].max(axis=1) < min_cases]
dropidx_deaths = table.loc['new_dpm'].index[table.loc['new_dpm'].max(axis=1) == 0]
first_idxs = (table.loc['total_cases'] > start_cases).idxmax(axis=1)
dropidx_data = table.loc['total_cases'].index[
(table.columns[-1] - first_idxs).dt.days < min_datapoints]
print(dropidx_cases, dropidx_deaths, dropidx_data)
table.drop(dropidx_cases, level='iso_code', inplace=True, errors='ignore')
table.drop(dropidx_deaths, level='iso_code', inplace=True, errors='ignore')
table.drop(dropidx_data, level='iso_code', inplace=True, errors='ignore')
table.drop(droplist, level='iso_code', inplace=True, errors='ignore')
table = table.rename(index=renames) # Rename any unusual ISO codes as needed
# Separate country statistics columns for later use, then temporarily remove
popn = table.loc['population'].mean(axis=1)
gdppc = table.loc['gdp_per_capita'].mean(axis=1)
table.drop(['population', 'gdp_per_capita'], level='field', inplace=True, errors='ignore')
# Convert column indices to day number since startdate
table.columns = (table.columns - pd.to_datetime('2019-12-31')).days
# Reorder multiindex levels before by-country subsetting
table = table.reorder_levels(['iso_code', 'field']).sort_index()
# Identify first date over infection threshold for each country and subset dataframe accordingly
for i in table.index.levels[0]:
first_idx = get_first_idx(table.loc[i].loc['total_cases'], start_cases)
table.loc[i].loc[:, :first_idx] = np.NaN
# Clean infinite values and switch multiindex levels back
table.replace([np.inf, -np.inf], np.NaN, inplace=True)
table = table.reorder_levels(['field', 'iso_code']).sort_index()
# Calculate aggregate dpm data for later use
mean_dpm = table.loc['new_dpm'][-hist_window:].mean(axis=1) # Mean over last `hist_window` days
# Extract mobility change data to pivot table
mobdata['average'] = pd.concat([mobdata['retail_and_recreation'], mobdata['workplaces']],
axis=1).mean(axis=1) # Get average of R&R and workplace values
mobdata.replace({'Country': c_dict}, inplace=True) # Convert country names to ISO codes
mobtable = pd.pivot_table(mobdata, values=['retail_and_recreation', 'workplaces', 'average'],
index='Year', columns='Country')
mobtable = mobtable.T
# Calculate averages over last `hist_window` days & recompile into new dataframe
mobtable = mobtable[mobtable.columns[-hist_window:]]
tbm = mobtable.mean(axis=1)
mobmean = pd.concat([tbm.loc['average'], tbm.loc['retail_and_recreation'], tbm.loc['workplaces']],
keys=['mob_avg', 'mob_rr', 'mob_wk'], axis=1)
display(mobmean)
# Export processed dataframes to .tab and import to VDF, or read in existing .tab
display(table)
if updatedata != 0:
table.to_csv('./InputData.tab', sep='\t')
subprocess.run(f"{vensim7path} \"./ImportData.cmd\"", check=True)
mobmean.to_csv('./MobilityData.tab', sep='\t')
else:
table = pd.read_csv(f'./InputData.tab', sep='\t', index_col=[0,1])
mobmean = pd.read_csv('./MobilityData.tab', sep='\t')
# Update FinalTime cin with last day of available data - IMPORTANT! USES LAST FILE IN CHANGES LIST
finaltime = len(table.columns)-1
with open(simsettings['changes'][0], 'w') as f:
f.write(f"FINAL TIME = {finaltime}")
# In[ ]:
##### MAIN ANALYSIS, DURATION SENSITIVITY & RESULTS-PROCESSING CODE #####
# Pull country list from data table
countrylist = list(table.index.levels[1])
print(countrylist)
basename = cf['baserunname']
# Loop through disease duration values to test, starting with main then sensitivity values
for i in ([main_dur] + sens_durs):
cf['baserunname'] = f'{basename}{i}'
baserunname = cf['baserunname']
print(baserunname)
# Create script object for given duration, to cleanly create calibration subfolder
sub = Script(cf)
sub.changes.extend(scenariolist)
sub.copy_model_files(baserunname)
copy(f"../{controlfilename}", "./")
# Overwrite disease duration cin file - IMPORTANT! USES LAST FILE IN CHANGES LIST
with open(simsettings['changes'][-1], 'w') as f:
f.write(f"DiseaseDuration = {i}")
dur = i # Assign disease duration variable
# Initialise necessary .mdl and .voc files
create_mdls(cf, countrylist, finaltime, logfile)
# Run country-by-country calibration process, unless otherwise specified (mccores=0)
if mccores != 0:
write_log(f"Initialising MCMC with duration {dur}!", logfile)
c_list = []
err_list = []
for c in countrylist:
# First run Powell optimization, then MCMC
res_i = compile_script(cf, CtyScript, c, 'i', {'model': f'_{c}', 'optparm': '_c'},
logfile, subdir=c)
if res_i != False:
res = compile_script(cf, CtyMCScript, c, 'MC', {'model': f'_{c}', 'optparm': '_cmc'},
logfile, chglist=[(c, 'i')], subdir=c)
if res != False:
c_list.append(c) # Compile updated c_list of successful calibrations
else:
err_list.append(c) # Compile error list of failed calibrations
else:
err_list.append(c) # Compile error list of failed calibrations
write_log(f"Calibration complete! Error list is:\n{err_list}", logfile)
# If calibration not needed, default to using country list from data as c_list
else:
write_log("Hang on to outdated imperialist dogma! Using previous output...", logfile)
c_list = countrylist
err_list = []
write_log("Processing results!", logfile)
# Initialise containers for processed country results and death data
res_list = []
dat_list = []
# Loop through country MCMC outputs, calling master results processing function on each
for c in c_list:
try:
c_res, datdf = process_results(f'./{c}/{baserunname}_{c}_MC', eqtime, earlytime,
gof_vars, iqr_list, means_list, perc_list, delta, dur)
res_list.append(c_res)
dat_list.append(datdf)
except FileNotFoundError:
err_list.append(c)
# Compile main results dataframe with processed country results
results = pd.concat(res_list, axis=1)
# Compile country infection and death outputs over time
dpm_data, eq_death, inf_data, inf_exp = [
pd.concat([df.loc[var] for df in dat_list], axis=1) for var in [
'DeathsOverTimeRaw', 'eqDeath', 'DataFlowOverTime', 'inf exp']]
# Assign | |
[owner_account, attacker_account]
else:
raise EthereumError('The account to perform the symbolic exploration of the contract should be "attacker", "owner" or "combo1"')
if contract_account is None:
logger.info("Failed to create contract. Exception in constructor")
self.finalize()
return
prev_coverage = 0
current_coverage = 0
tx_no = 0
while (current_coverage < 100 or not tx_use_coverage) and not self.is_shutdown():
try:
logger.info("Starting symbolic transaction: %d", tx_no)
# run_symbolic_tx
symbolic_data = self.make_symbolic_buffer(320)
symbolic_value = self.make_symbolic_value()
self.transaction(caller=tx_account[min(tx_no, len(tx_account) - 1)],
address=contract_account,
data=symbolic_data,
value=symbolic_value)
logger.info("%d alive states, %d terminated states", self.count_running_states(), self.count_terminated_states())
except NoAliveStates:
break
# Check if the maximun number of tx was reached
if tx_limit is not None and tx_no + 1 == tx_limit:
break
# Check if coverage has improved or not
if tx_use_coverage:
prev_coverage = current_coverage
current_coverage = self.global_coverage(contract_account)
found_new_coverage = prev_coverage < current_coverage
if not found_new_coverage:
break
tx_no += 1
def run(self, **kwargs):
''' Run any pending transaction on any running state '''
# Check if there is a pending transaction
with self.locked_context('seth') as context:
# there is no states added to the executor queue
assert len(self._executor.list()) == 0
for state_id in context['_saved_states']:
self._executor.put(state_id)
context['_saved_states'] = set()
# A callback will use _pending_transaction and issue the transaction
# in each state (see load_state_callback)
super(ManticoreEVM, self).run(**kwargs)
with self.locked_context('seth') as context:
if len(context['_saved_states']) == 1:
self._initial_state = self._executor._workspace.load_state(context['_saved_states'].pop(), delete=True)
context['_saved_states'] = set()
assert self._running_state_ids == (-1,)
def save(self, state, state_id=None, final=False):
''' Save a state in secondary storage and add it to running or final lists
:param state: A manticore State
:param state_id: if not None force state_id (overwrite)
:param final: True if state is final
:returns: a state id
'''
# If overwriting then the state_id must be known
if state_id is not None:
if state_id not in self._all_state_ids:
raise EthereumError("Trying to overwrite unknown state_id")
with self.locked_context('seth') as context:
context['_final_states'].discard(state_id)
context['_saved_states'].discard(state_id)
if state_id != -1:
# save the state to secondary storage
state_id = self._executor._workspace.save_state(state, state_id=state_id)
with self.locked_context('seth') as context:
if final:
# Keep it on a private list
context['_final_states'].add(state_id)
else:
# Keep it on a private list
context['_saved_states'].add(state_id)
return state_id
def load(self, state_id=None):
''' Load one of the running or final states.
:param state_id: If None it assumes there is a single running state
:type state_id: int or None
'''
state = None
if state_id is None:
#a single state was assumed
if self.count_running_states() == 1:
#Get the ID of the single running state
state_id = self._running_state_ids[0]
else:
raise EthereumError("More than one state running, you must specify state id.")
if state_id == -1:
state = self.initial_state
else:
state = self._executor._workspace.load_state(state_id, delete=False)
#froward events from newly loaded object
self._executor.forward_events_from(state, True)
return state
# Callbacks
def _symbolic_sha3(self, state, data, known_hashes):
''' INTERNAL USE '''
with self.locked_context('known_sha3', set) as known_sha3:
state.platform._sha3.update(known_sha3)
def _concrete_sha3(self, state, buf, value):
''' INTERNAL USE '''
with self.locked_context('known_sha3', set) as known_sha3:
known_sha3.add((str(buf), value))
def _terminate_state_callback(self, state, state_id, e):
''' INTERNAL USE
Every time a state finishes executing last transaction we save it in
our private list
'''
if str(e) == 'Abandoned state':
#do nothing
return
world = state.platform
state.context['last_exception'] = e
e.testcase = False # Do not generate a testcase file
if not world.all_transactions:
logger.debug("Something was wrong. Search terminated in the middle of an ongoing tx")
self.save(state, final=True)
return
tx = world.all_transactions[-1]
#is we initiated the Tx we need process the outcome for now.
#Fixme incomplete.
if tx.is_human():
if tx.sort == 'CREATE':
if tx.result == 'RETURN':
world.set_code(tx.address, tx.return_data)
else:
world.delete_account(tx.address)
else:
logger.info("Manticore exception. State should be terminated only at the end of the human transaction")
#Human tx that ends in this wont modify the storage so finalize and
# generate a testcase. FIXME This should be configurable as REVERT and
# THROWit actually changes the balance and nonce? of some accounts
if tx.result in {'REVERT', 'THROW', 'TXERROR'}:
self.save(state, final=True)
else:
assert tx.result in {'SELFDESTRUCT', 'RETURN', 'STOP'}
# if not a revert we save the state for further transactioning
self.save(state) # Add to running states
#Callbacks
def _load_state_callback(self, state, state_id):
''' INTERNAL USE
When a state was just loaded from stoage we do the pending transaction
'''
if '_pending_transaction' not in state.context:
return
world = state.platform
ty, caller, address, value, data, price = state.context['_pending_transaction']
del state.context['_pending_transaction']
if ty == 'CALL':
world.transaction(address=address, caller=caller, data=data, value=value, price=price)
else:
assert ty == 'CREATE'
world.create_contract(caller=caller, address=address, balance=value, init=data, price=price)
def _did_evm_execute_instruction_callback(self, state, instruction, arguments, result_ref):
''' INTERNAL USE '''
logger.debug("%s", state.platform.current_vm)
#TODO move to a plugin
at_init = state.platform.current_transaction.sort == 'CREATE'
if at_init:
coverage_context_name = 'init_coverage'
else:
coverage_context_name = 'runtime_coverage'
with self.locked_context(coverage_context_name, set) as coverage:
coverage.add((state.platform.current_vm.address, instruction.pc))
state.context.setdefault('evm.trace', []).append((state.platform.current_vm.address, instruction.pc, at_init))
def _did_evm_read_code(self, state, offset, size):
''' INTERNAL USE '''
with self.locked_context('code_data', set) as code_data:
for i in range(offset, offset + size):
code_data.add((state.platform.current_vm.address, i))
def get_metadata(self, address):
''' Gets the solidity metadata for address.
This is available only if address is a contract created from solidity
'''
return self.metadata.get(int(address))
def register_detector(self, d):
if not isinstance(d, Detector):
raise EthereumError("Not a Detector")
if d.name in self.detectors:
raise EthereumError("Detector already registered")
self.detectors[d.name] = d
self.register_plugin(d)
return d.name
def unregister_detector(self, d):
if not isinstance(d, (Detector, str)):
raise EthereumError("Not a Detector")
name = d
if isinstance(d, Detector):
name = d.name
if name not in self.detectors:
raise EthereumError("Detector not registered")
d = self.detectors[name]
del self.detectors[name]
self.unregister_plugin(d)
@property
def workspace(self):
return self._executor._workspace._store.uri
def generate_testcase(self, state, name, message=''):
self._generate_testcase_callback(state, name, message)
def _generate_testcase_callback(self, state, name, message=''):
'''
Create a serialized description of a given state.
:param state: The state to generate information about
:param message: Accompanying message
'''
# workspace should not be responsible for formating the output
# each object knows its secrets, each class should be able to report its
# final state
#super(ManticoreEVM, self)._generate_testcase_callback(state, name, message)
# TODO(mark): Refactor ManticoreOutput to let the platform be more in control
# so this function can be fully ported to EVMWorld.generate_workspace_files.
blockchain = state.platform
def flagged(flag):
return '(*)' if flag else ''
testcase = self._output.testcase(name.replace(' ', '_'))
last_tx = blockchain.last_transaction
if last_tx:
message = message + last_tx.result
logger.info("Generated testcase No. {} - {}".format(testcase.num, message))
local_findings = set()
for detector in self.detectors.values():
for address, pc, finding, at_init in detector.get_findings(state):
if (address, pc, finding, at_init) not in local_findings:
local_findings.add((address, pc, finding, at_init))
if len(local_findings):
with testcase.open_stream('findings') as findings:
for address, pc, finding, at_init in local_findings:
findings.write('- %s -\n' % finding)
findings.write(' Contract: 0x%x\n' % address)
findings.write(' EVM Program counter: %s%s\n' % (pc, at_init and " (at constructor)" or ""))
md = self.get_metadata(address)
if md is not None:
src = md.get_source_for(pc, runtime=not at_init)
findings.write(' Snippet:\n')
findings.write(src.replace('\n', '\n ').strip())
findings.write('\n')
with testcase.open_stream('summary') as summary:
summary.write("Message: %s\n" % message)
summary.write("Last exception: %s\n" % state.context.get('last_exception', 'None'))
if last_tx:
at_runtime = last_tx.sort != 'CREATE'
address, offset, at_init = state.context['evm.trace'][-1]
assert at_runtime != at_init
#Last instruction if last tx vas valid
if state.context['last_exception'].message != 'TXERROR':
metadata = self.get_metadata(blockchain.last_transaction.address)
if metadata is not None:
summary.write('Last instruction at contract %x offset %x\n' % (address, offset))
source_code_snippet = metadata.get_source_for(offset, at_runtime)
if source_code_snippet:
summary.write(source_code_snippet)
summary.write('\n')
# Accounts summary
is_something_symbolic = False
summary.write("%d accounts.\n" % len(blockchain.accounts))
for account_address in blockchain.accounts:
is_account_address_symbolic = issymbolic(account_address)
account_address = state.solve_one(account_address)
summary.write("* %s::\n" % self.account_name(account_address))
summary.write("Address: 0x%x %s\n" % (account_address, flagged(is_account_address_symbolic)))
balance = blockchain.get_balance(account_address)
is_balance_symbolic = issymbolic(balance)
is_something_symbolic = is_something_symbolic or is_balance_symbolic
balance = state.solve_one(balance)
summary.write("Balance: %d %s\n" % (balance, flagged(is_balance_symbolic)))
from .core.smtlib.visitors import translate_to_smtlib
storage = blockchain.get_storage(account_address)
summary.write("Storage: %s\n" % translate_to_smtlib(storage, use_bindings=True))
all_used_indexes = []
with state.constraints as temp_cs:
index = temp_cs.new_bitvec(256)
storage = blockchain.get_storage(account_address)
temp_cs.add(storage.get(index) != 0)
try:
while True:
a_index = solver.get_value(temp_cs, index)
all_used_indexes.append(a_index)
temp_cs.add(storage.get(a_index) != 0)
temp_cs.add(index != a_index)
except:
pass
if all_used_indexes:
summary.write("Storage:\n")
for i in all_used_indexes:
value = storage.get(i)
is_storage_symbolic = issymbolic(value)
summary.write("storage[%x] = %x %s\n" % (state.solve_one(i), state.solve_one(value), flagged(is_storage_symbolic)))
'''if blockchain.has_storage(account_address):
summary.write("Storage:\n")
for offset, value in blockchain.get_storage_items(account_address):
is_storage_symbolic = issymbolic(offset) or issymbolic(value)
offset = state.solve_one(offset)
value = state.solve_one(value)
summary.write("\t%032x -> %032x %s\n" % (offset, value, flagged(is_storage_symbolic)))
is_something_symbolic = is_something_symbolic or is_storage_symbolic
'''
runtime_code = state.solve_one(blockchain.get_code(account_address))
if runtime_code:
summary.write("Code:\n")
fcode | |
<filename>kolejka/observer/server.py
# vim:ts=4:sts=4:sw=4:expandtab
import cgi
import datetime
import hashlib
import http.server
import json
import logging
from multiprocessing import Process
import os
import re
import signal
import socket
import socketserver
import time
import traceback
from urllib.parse import urlparse, urlencode, parse_qsl
import uuid
from kolejka.common.settings import OBSERVER_CGROUPS, OBSERVER_PID_FILE, OBSERVER_SERVERSTRING
from kolejka.common import HTTPUnixServer, HTTPUnixConnection
from kolejka.common import KolejkaLimits, KolejkaStats
from kolejka.common import ControlGroupSystem
#TODO: detect subsessions?
class Session:
@property
def system(self):
return self.registry.control_group_system
@property
def group_name(self):
return 'kolejka_observer_' + self.id
def group_path(self, group, filename=''):
assert group in self.groups
return os.path.join(self.system.mount_point(group), self.groups[group].strip('/'), filename.strip('/')).rstrip('/')
def parent_group_path(self, group, filename=''):
assert group in self.groups
return os.path.join(self.system.mount_point(group), self.parent_groups[group].strip('/'), filename.strip('/')).rstrip('/')
def list_group(self, group):
result = set()
path = self.group_path(group)
for d, _, _ in os.walk(path):
group_list_path = os.path.join(path, d, 'cgroup.procs')
if os.path.exists(group_list_path):
with open(group_list_path) as group_list_file:
result.update([line.strip() for line in group_list_file.readlines()])
return sorted(list(result))
def cpuset_cpus(self, path):
path=os.path.abspath(path)
while path.startswith(self.system.mount_point('cpuset')):
cpuset_path = os.path.join(path, 'cpuset.cpus')
if os.path.exists(cpuset_path):
with open(cpuset_path) as cpuset_file:
cpus = cpuset_file.readline().strip()
if cpus != '':
cpuset = self.system.parse_cpuset(cpus)
return sorted(list(cpuset))
path = os.path.dirname(path)
def limited_cpus(self):
return self.cpuset_cpus(self.group_path('cpuset'))
def available_cpus(self):
return self.cpuset_cpus(os.path.dirname(self.group_path('cpuset')))
def pid_start_time(self, pid):
try:
stat_path = os.path.join('/proc', str(pid), 'stat')
with open(stat_path) as stat_file:
stats = stat_file.read()
stats = re.sub(r'^[^)]*\) ', '', stats).split()
return int(stats[19])
except:
pass
def limited_pids(self):
path = self.group_path('pids')
result = 2**16
while path.startswith(self.system.mount_point('pids')):
pids_path = os.path.join(path, 'pids.max')
if os.path.exists(pids_path):
with open(pids_path) as pids_file:
pids = pids_file.readline().strip()
if pids != '' and pids != 'max':
result = min(result, int(pids))
path = os.path.dirname(path)
return result
def finished(self):
if self.pid_start_time(self.creator_pid) == self.creator_start_time:
return False
if len(self.list_group('pids')) > 0:
return False
return True
def __init__(self, registry, session_id, pid):
self.registry = registry
self.id = session_id
self.creator_pid = pid
self.creator_start_time = self.pid_start_time(self.creator_pid)
pid_groups = self.system.pid_groups(pid)
self.parent_groups = dict()
self.groups = dict()
for group in OBSERVER_CGROUPS:
self.parent_groups[group] = pid_groups[group]
self.groups[group] = os.path.join(self.parent_groups[group], self.group_name)
for group in OBSERVER_CGROUPS:
if group == 'memory':
with open(os.path.join(os.path.dirname(self.group_path(group)), 'memory.use_hierarchy')) as f:
use_hierarchy = bool(f.readline().strip())
assert use_hierarchy
os.makedirs(self.group_path(group), exist_ok=True)
if group == 'cpuset':
for inherit in ['cpuset.cpus', 'cpuset.mems']:
with open(os.path.join(os.path.dirname(self.group_path(group)), inherit)) as f:
with open(os.path.join(self.group_path(group), inherit), 'w') as t:
t.write(f.read())
logging.debug('Created session %s with paths [%s] for pid %s'%(self.id, ','.join(self.groups.values()), self.creator_pid))
self.start_time = time.perf_counter()
self.close_time = None
def attach(self, pid):
pid_groups = self.system.pid_groups(pid)
for group in OBSERVER_CGROUPS:
assert os.path.join(pid_groups[group], self.group_name) == self.groups[group]
for group in OBSERVER_CGROUPS:
tasks_path = self.group_path(group, filename='tasks')
assert os.path.isfile(tasks_path)
with open(tasks_path, 'w') as tasks_file:
tasks_file.write(str(pid))
logging.debug('Attached process %s to session %s'%(str(pid), self.id))
def detach(self, pid):
pid_groups = self.system.pid_groups(pid)
for group in OBSERVER_CGROUPS:
assert os.path.join(pid_groups[group], self.group_name) == self.groups[group] or pid_groups[group] == self.groups[group]
for group in OBSERVER_CGROUPS:
tasks_path = self.parent_group_path(group, filename='tasks')
assert os.path.isfile(tasks_path)
with open(tasks_path, 'w') as tasks_file:
tasks_file.write(str(pid))
logging.debug('Detached process %s from session %s'%(str(pid), self.id))
def limits(self, limits=KolejkaLimits()):
if limits.memory is not None:
assert 'memory' in self.groups
limit_file = self.group_path('memory', filename='memory.limit_in_bytes')
with open(limit_file, 'w') as f:
f.write(str(limits.memory))
logging.debug('Limited session %s memory to %s bytes'%(self.id, limits.memory))
if limits.swap is not None:
assert 'memory' in self.groups
limit_file = self.group_path('memory', filename='memory.memsw.limit_in_bytes')
with open(limit_file, 'w') as f:
f.write(str(limits.memory+limits.swap))
logging.debug('Limited session %s swap to %s bytes'%(self.id, limits.swap))
if limits.cpus is not None:
assert 'cpuset' in self.groups
cpuset_cpus = self.available_cpus()
logging.debug('Available cpus: %s', ','.join([str(c) for c in cpuset_cpus]))
cpus_offset = limits.cpus_offset or 0
if len(cpuset_cpus) < cpus_offset + limits.cpus:
cpus_offset = 0
if len(cpuset_cpus) > cpus_offset + limits.cpus:
cpuset_cpus = cpuset_cpus[0:limits.cpus]
limit_file = self.group_path('cpuset', filename='cpuset.cpus')
with open(limit_file, 'w') as f:
f.write(','.join([str(c) for c in cpuset_cpus]))
logging.debug('Limited session %s cpus to %s'%(self.id, ','.join([str(c) for c in cpuset_cpus])))
if limits.pids is not None:
assert 'pids' in self.groups
limit_file = self.group_path('pids', filename='pids.max')
with open(limit_file, 'w') as f:
f.write(str(limits.pids))
logging.debug('Limited session %s pids to %s'%(self.id, limits.pids))
if limits.time is not None:
self.close_time = self.start_time + limits.time.total_seconds()
logging.debug('Limited session %s time to %f'%(self.id, limits.time.total_seconds()))
else:
self.close_time = None
def freeze(self, freeze=True):
assert 'freezer' in self.groups
if freeze:
command = 'FROZEN'
else:
command = 'THAWED'
state_file = self.group_path('freezer', filename='freezer.state')
with open(state_file, 'w') as f:
f.write(command)
logging.debug('%s session %s'%(command, self.id))
if freeze:
while True:
with open(state_file) as f:
if f.readline().strip().lower() == 'frozen':
return
#TODO: wait for FROZEN. Is this code good?
def freezing(self):
assert 'freezer' in self.groups
state_file = self.group_path('freezer', filename='freezer.self_freezing')
with open(state_file) as f:
return f.readline().strip() == '1'
def stats(self):
stats = self.system.groups_stats(self.groups)
time_stats = KolejkaStats()
time_stats.time = datetime.timedelta(seconds = max(0, time.perf_counter() - self.start_time))
stats.update(time_stats)
return stats
def kill(self):
state = self.freezing()
self.freeze(freeze=True)
pids = self.list_group('pids')
for pid in pids:
try:
os.kill(int(pid), signal.SIGKILL)
except OSError:
pass
logging.debug('KILLED session %s'%(self.id))
self.freeze(freeze=state)
def close(self):
try:
self.kill()
except:
pass
try:
self.freeze(freeze=False)
except:
pass
time.sleep(0.1) #TODO: Allow thawed killed processes to die. HOW?
self.system.groups_close(self.groups)
logging.debug('CLOSED session %s'%(self.id))
class SessionRegistry:
def __init__(self):
self.sessions = dict()
self.session_stats = dict()
self.control_group_system = ControlGroupSystem()
self.salt = uuid.uuid4().hex
def cleanup(self):
for session_id in dict(self.sessions):
self.close(session_id)
def cleanup_finished(self):
current_time = time.perf_counter()
for session_id, session in list(self.sessions.items()):
if session.close_time is not None and session.close_time < current_time:
self.close(session_id)
if session.finished():
self.close(session_id)
for session_id, stats in list(self.session_stats.items()):
if session_id in self.sessions:
continue
stats_time = stats[1]
if stats_time + 300 < current_time:
del self.session_stats[session_id]
def open(self, session_id, pid):
if session_id not in self.sessions:
self.sessions[session_id] = Session(self, session_id, pid)
def attach(self, session_id, pid):
if session_id not in self.sessions:
self.sessions[session_id] = Session(self, session_id, pid)
return self.sessions[session_id].attach(pid=pid);
def detach(self, session_id, pid):
assert session_id in self.sessions
return self.sessions[session_id].detach(pid=pid);
def limits(self, session_id, limits=KolejkaLimits()):
assert session_id in self.sessions
return self.sessions[session_id].limits(limits=limits)
def stats(self, session_id):
if session_id in self.sessions:
current_time = time.perf_counter()
stats = self.session_stats.get(session_id, (KolejkaStats(), 0))[0]
stats.update(self.sessions[session_id].stats())
self.session_stats[session_id] = (stats, current_time)
return self.session_stats.get(session_id, (KolejkaStats(), 0))[0]
def freeze(self, session_id):
assert session_id in self.sessions
return self.sessions[session_id].freeze(freeze=True)
def thaw(self, session_id):
assert session_id in self.sessions
return self.sessions[session_id].freeze(freeze=False)
def kill(self, session_id):
if session_id not in self.sessions:
return
self.sessions[session_id].kill()
def close(self, session_id):
if session_id not in self.sessions:
return
try:
try:
self.stats(session_id)
except:
pass
self.sessions[session_id].close()
del self.sessions[session_id]
except:
pass
class ObserverServer(socketserver.ThreadingMixIn, HTTPUnixServer):
def __enter__(self, *args, **kwargs):
super().__enter__(*args, **kwargs)
self.session_registry = SessionRegistry()
return self
def __exit__(self, *args, **kwargs):
self.session_registry.cleanup()
super().__exit__(*args, **kwargs)
class ObserverHandler(http.server.BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.mute_log_request = False
super().__init__(*args, **kwargs)
@property
def session_registry(self):
return self.server.session_registry
def version_string(self):
return OBSERVER_SERVERSTRING
def send_json(self, result=None, code=200, message=None):
try:
result = json.dumps(result)
except:
logging.warning(traceback.format_exc())
self.send_error(500)
self.end_headers()
return
else:
result = bytes(result, 'utf-8')
self.send_response(200)
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.send_header('Content-Length', len(result))
self.end_headers()
self.wfile.write(result)
def log_request(self, *args, **kwargs):
if not self.mute_log_request:
super().log_request(*args, **kwargs)
def do_HEAD(self):
self.mute_log_request = True
self.session_registry.cleanup_finished()
self.send_response(200)
self.end_headers()
self.mute_log_request = False
def do_GET(self, params_override={}):
self.session_registry.cleanup_finished()
self.send_response(200)
self.end_headers()
def do_POST(self):
self.session_registry.cleanup_finished()
try:
post_data = dict()
if 'Content-Length' in self.headers:
post_length = int(self.headers['Content-Length'])
if 'Content-Type' in self.headers:
post_type, post_type_dict = cgi.parse_header(self.headers['Content-Type'])
assert post_type == 'application/json'
post_charset = post_type_dict.get('charset', 'utf-8')
post_data = json.loads(self.rfile.read(post_length).decode(post_charset))
url = urlparse(self.path)
path = url.path.strip('/ ').lower()
assert re.match(r'[a-z]*', path)
except:
logging.warning(traceback.format_exc())
self.send_error(400)
self.end_headers()
return
else:
return self.cmd(path, post_data)
def cmd(self, path, params):
check_session = False
fun = self.cmd_default
if path == '':
fun = self.cmd_root
elif path == 'open':
fun = self.cmd_open
elif path == 'attach':
fun = self.cmd_attach
if 'session_id' in params:
check_session = True
elif path == 'detach':
fun = self.cmd_detach
check_session = True
elif path == 'limits':
fun = self.cmd_limits
check_session = True
elif path == 'stats':
if 'session_id' not in params:
self.send_error(400)
self.end_headers()
return
fun = self.cmd_stats
elif path == 'freeze':
fun = self.cmd_freeze
check_session = True
elif path == 'thaw':
fun = self.cmd_thaw
check_session = True
elif path == 'kill':
fun = self.cmd_kill
check_session = True
elif path == 'close':
fun = self.cmd_close
check_session = True
if 'group' in params:
group = params['group']
if not re.match(r'[a-z0-9_.]*', group) or len(group) > 32:
self.send_error(403)
self.end_headers()
return
if check_session:
if not self.check_secret(params.get('session_id', ''), params.get('secret', '')):
self.send_error(403)
self.end_headers()
return
try:
result = fun(params)
except:
logging.warning(traceback.format_exc())
self.send_error(500)
self.end_headers()
return
else:
self.send_json(result)
def cmd_default(self, params):
raise Exception('method unknown')
def cmd_root(self, params):
result = dict()
result['sessions'] = len(self.session_registry.sessions)
result['status'] = 'ok'
return result
class std_params:
def __init__(self, params):
self.session_id = params.get('session_id', None)
self.secret = params.get('secret', None)
self.limits = KolejkaLimits()
self.limits.load(params.get('limits', {}))
def cmd_open(self, params):
result = dict()
params['session_id'] = self.generate_session_id()
params['secret'] = self.generate_secret(params['session_id'])
pid = int(self.client_address[0])
sparams = ObserverHandler.std_params(params)
self.session_registry.open(sparams.session_id, pid)
result['session_id'] = sparams.session_id
result['secret'] = sparams.secret
result['status'] = 'ok'
return result
def cmd_attach(self, params):
result = dict()
if 'session_id' not in params:
| |
you specify a non-empty and unique name in order to make it easier to identify the channels in your project, though this is not enforced. The display name is limited to 512 Unicode characters.
:param pulumi.Input[bool] enabled: Whether notifications are forwarded to the described channel. This makes it possible to disable delivery of notifications to a particular channel without removing the channel from all alerting policies that reference the channel. This is a more convenient approach when the change is temporary and you want to receive notifications from the same set of alerting policies on the channel at some point in the future.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Configuration fields that define the channel and its behavior. The
permissible and required labels are specified in the
NotificationChannelDescriptor corresponding to the type field.
Labels with sensitive data are obfuscated by the API and therefore the provider cannot
determine if there are upstream changes to these fields. They can also be configured via
the sensitive_labels block, but cannot be configured in both places.
:param pulumi.Input[str] name: The full REST resource name for this channel. The syntax is: projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID] The
[CHANNEL_ID] is automatically assigned by the server on creation.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input['NotificationChannelSensitiveLabelsArgs'] sensitive_labels: Different notification type behaviors are configured primarily using the the `labels` field on this
resource. This block contains the labels which contain secrets or passwords so that they can be marked
sensitive and hidden from plan output. The name of the field, eg: password, will be the key
in the `labels` map in the api request.
Credentials may not be specified in both locations and will cause an error. Changing from one location
to a different credential configuration in the config will require an apply to update state.
Structure is documented below.
:param pulumi.Input[str] type: The type of the notification channel. This field matches the value of the NotificationChannelDescriptor.type field. See https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.notificationChannelDescriptors/list to get the list of valid values such as "email", "slack", etc...
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] user_labels: User-supplied key/value data that does not need to conform to the corresponding NotificationChannelDescriptor's schema, unlike the labels field. This field is intended to be used for organizing and identifying the NotificationChannel objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.
:param pulumi.Input[str] verification_status: Indicates whether this channel has been verified or not. On a ListNotificationChannels or GetNotificationChannel
operation, this field is expected to be populated.If the value is UNVERIFIED, then it indicates that the channel is
non-functioning (it both requires verification and lacks verification); otherwise, it is assumed that the channel
works.If the channel is neither VERIFIED nor UNVERIFIED, it implies that the channel is of a type that does not require
verification or that this specific channel has been exempted from verification because it was created prior to
verification being required for channels of this type.This field cannot be modified using a standard
UpdateNotificationChannel operation. To change the value of this field, you must call VerifyNotificationChannel.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if sensitive_labels is not None:
pulumi.set(__self__, "sensitive_labels", sensitive_labels)
if type is not None:
pulumi.set(__self__, "type", type)
if user_labels is not None:
pulumi.set(__self__, "user_labels", user_labels)
if verification_status is not None:
pulumi.set(__self__, "verification_status", verification_status)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional human-readable description of this notification channel. This description may provide additional details, beyond the display name, for the channel. This may not exceed 1024 Unicode characters.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
An optional human-readable name for this notification channel. It is recommended that you specify a non-empty and unique name in order to make it easier to identify the channels in your project, though this is not enforced. The display name is limited to 512 Unicode characters.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether notifications are forwarded to the described channel. This makes it possible to disable delivery of notifications to a particular channel without removing the channel from all alerting policies that reference the channel. This is a more convenient approach when the change is temporary and you want to receive notifications from the same set of alerting policies on the channel at some point in the future.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Configuration fields that define the channel and its behavior. The
permissible and required labels are specified in the
NotificationChannelDescriptor corresponding to the type field.
Labels with sensitive data are obfuscated by the API and therefore the provider cannot
determine if there are upstream changes to these fields. They can also be configured via
the sensitive_labels block, but cannot be configured in both places.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The full REST resource name for this channel. The syntax is: projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID] The
[CHANNEL_ID] is automatically assigned by the server on creation.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="sensitiveLabels")
def sensitive_labels(self) -> Optional[pulumi.Input['NotificationChannelSensitiveLabelsArgs']]:
"""
Different notification type behaviors are configured primarily using the the `labels` field on this
resource. This block contains the labels which contain secrets or passwords so that they can be marked
sensitive and hidden from plan output. The name of the field, eg: password, will be the key
in the `labels` map in the api request.
Credentials may not be specified in both locations and will cause an error. Changing from one location
to a different credential configuration in the config will require an apply to update state.
Structure is documented below.
"""
return pulumi.get(self, "sensitive_labels")
@sensitive_labels.setter
def sensitive_labels(self, value: Optional[pulumi.Input['NotificationChannelSensitiveLabelsArgs']]):
pulumi.set(self, "sensitive_labels", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the notification channel. This field matches the value of the NotificationChannelDescriptor.type field. See https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.notificationChannelDescriptors/list to get the list of valid values such as "email", "slack", etc...
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userLabels")
def user_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
User-supplied key/value data that does not need to conform to the corresponding NotificationChannelDescriptor's schema, unlike the labels field. This field is intended to be used for organizing and identifying the NotificationChannel objects.The field can contain up to 64 entries. Each key and value is limited to 63 Unicode characters or 128 bytes, whichever is smaller. Labels and values can contain only lowercase letters, numerals, underscores, and dashes. Keys must begin with a letter.
"""
return pulumi.get(self, "user_labels")
@user_labels.setter
def user_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "user_labels", value)
@property
@pulumi.getter(name="verificationStatus")
def verification_status(self) -> Optional[pulumi.Input[str]]:
"""
Indicates whether this channel has been verified or not. On a ListNotificationChannels or GetNotificationChannel
operation, this field is expected to be populated.If the value is UNVERIFIED, then it indicates that the channel is
non-functioning (it both requires verification and | |
<reponame>jiajiaxu123/Orca<filename>orca/core/indexes.py
import abc
import itertools
from typing import Iterable
import dolphindb as ddb
import numpy as np
import pandas as pd
from .common import default_session
from .datetimes import DatetimeProperties
from .internal import _ConstantSP, _InternalFrame, _InternalAccessor
from .operator import IndexLike, SeriesLike, ArithOpsMixin, StatOpsMixin, LogicalOpsMixin, IOOpsMixin
from .utils import (
_to_freq, dolphindb_temporal_types, _to_numpy_dtype,
is_dolphindb_uploadable, sql_select,
get_orca_obj_from_script)
class IndexOpsMixin(ArithOpsMixin, LogicalOpsMixin, metaclass=abc.ABCMeta):
def __init__(self, internal, session):
self._internal = internal
self._session = session
if isinstance(internal, _ConstantSP):
self._name = None
else:
names = [name[0] if name is not None else None
for _, name in internal.index_map]
if len(names) == 0:
self._name = None
self._names = None
elif len(names) == 1:
self._name = names[0]
self._names = None
elif len(names) > 1:
self._name = None
self._names = names
# self._dtype = internal.dtype
def __len__(self):
return len(self._internal)
# @property TODO: name getter and setter
# def name(self):
# return self.name
# @name.setter
# def name(self, name):
# self.rename(inplace=True)
@property
def _ddb_dtype(self):
if isinstance(self._internal, _ConstantSP):
return self._type
else:
index_column = self._index_column
return self._ddb_dtypes[index_column]
@property
def ndim(self):
return 1
@property
def size(self):
return len(self)
@property
def dtype(self):
return _to_numpy_dtype(self._ddb_dtype)
@abc.abstractmethod
def to_numpy(self):
pass
@abc.abstractmethod
def to_pandas(self):
pass
@property
def is_monotonic(self):
return self._unary_agg_op("isSorted", axis=None, level=None, numeric_only=False)
@property
def is_monotonic_increasing(self):
return self._unary_agg_op("isSorted", axis=None, level=None, numeric_only=False)
@property
def is_monotonic_decreasing(self):
return self._unary_agg_op("isSorted{,false}", axis=None, level=None, numeric_only=False)
@property
def is_unique(self):
len_self = len(self)
return len_self == 1 or self.nunique == len_self
@property
def hasnans(self):
return self._unary_agg_op("hasNull", axis=None, level=None, numeric_only=False)
def _unary_op(self, *args, **kwargs):
return ArithOpsMixin._unary_op(self, *args, **kwargs)
def _binary_op(self, *args, **kwargs):
return ArithOpsMixin._binary_op(self, *args, **kwargs)
def _extended_binary_op(self, *args, **kwargs):
return ArithOpsMixin._extended_binary_op(self, *args, **kwargs)
def _logical_op(self, *args, **kwargs):
return LogicalOpsMixin._logical_op(self, *args, **kwargs)
def _logical_unary_op(self, *args, **kwargs):
return LogicalOpsMixin._logical_unary_op(self, *args, **kwargs)
def _to_script(self):
odf = self._internal
if isinstance(odf, _ConstantSP):
return self._var_name
select_list = self._index_columns
return sql_select(select_list, self._var_name)
# elif self._segmented:
# select_list = self._index_columns
# return sql_select(select_list, self._var_name, is_exec=True)
# else:
# assert len(self._index_columns) == 1
# var_name, column_name = self._var_name, self._index_column
# return f"{var_name}.{column_name}"
def _binary_op_on_different_indices(self, other, func, axis):
"""
Implementation of binary operator between Series on different
indices. A new Series representing an in-memory DolphinDB table
is returned. It is garenteed that both Series have no where_expr.
Parameters
----------
other : _Frame
Right hand side of the operator.
func : str
Fuction name.
Returns
-------
orca.DataFrame
The result of the operation.
Raises
------
NotImplementedError
To be implemented.
"""
from .merge import _generate_joiner
_COLUMN_NAME = "ORCA_DIFFERENT_INDICES_COLUMN"
if other._is_series_like:
session = self._session
self_var_name, other_var_name = self._var_name, other._var_name
self_column_name = self._data_columns[0]
other_column_name = other._data_columns[0]
select_list = [f"{func}({self_var_name}.{self_column_name}, {other_var_name}.{other_column_name}) as {_COLUMN_NAME}"]
index_list, from_clause = _generate_joiner(
self_var_name, other_var_name, self._index_columns, other._index_columns)
select_list = itertools.chain(index_list, select_list)
script = sql_select(select_list, from_clause)
index_map = [(s_map[0], None if s_map[1] != o_map[1] else s_map[1])
for s_map, o_map
in zip(self._internal.index_map, other._internal.index_map)]
return self._get_from_script(
session, script, data_columns=[_COLUMN_NAME], index_map=index_map)
elif other._is_dataframe_like:
raise NotImplementedError()
class Index(IndexLike, _InternalAccessor, IndexOpsMixin, IOOpsMixin):
"""
Accessor for DataFrame and Series.
When calling get_select_list, a specific identifier is added before the
column.
When names are not given, a specific identifier is used instead.
"""
def __init__(self, data, dtype=None, copy=False, name=None, tupleize_cols=None, session=default_session()):
if isinstance(data, _ConstantSP):
assert dtype is None
assert not copy
assert tupleize_cols is None
IndexOpsMixin.__init__(self, data, session)
self._name = name
elif isinstance(data, _InternalFrame):
assert dtype is None
assert name is None
assert not copy
assert tupleize_cols is None
IndexOpsMixin.__init__(self, data, session)
else:
if isinstance(data, (pd.Index, pd.Series)):
idx = (data if dtype is None and name is None and tupleize_cols is None
else pd.Index(data, dtype=dtype, name=name, tupleize_cols=tupleize_cols))
else:
idx = pd.Index(data=data, dtype=dtype, copy=False, name=name,
tupleize_cols=tupleize_cols) # TODO: copy = True or False ?, freq?
# var = _ConstantSP.upload_obj(session, idx.to_numpy())
# var._framize(name=idx.name)
# IndexOpsMixin.__init__(self, var, session)
# self._name = idx.name
odf = _InternalFrame.from_pandas(session, idx)
IndexOpsMixin.__init__(self, odf, session)
self._where_expr = None
def __repr__(self):
if self._segmented:
return "<.index.Index object representing a column in a DolphinDB segmented table>"
else:
return self.to_pandas().__repr__()
def __eq__(self, other):
if type(self) != type(other):
return False
else:
return (self._var_name == other._var_name
and self._index_columns == other._index_columns)
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def _from_internal(cls, odf, index=None):
"""
Create an orca Index indicated by an _InternalFrame and another
pandas or orca Index.
Parameters
----------
odf : _InternalFrame
odf provides the metadata of the represented DolphinDB table
and servers as the _internal attribute of the Index
index : pd.Index or orca.Index, optional
index provides the metadata such as name, frequency, etc. of
the Index, by default None
"""
session = odf._session
if index is None or not isinstance(index, pd.DatetimeIndex):
if odf.is_any_vector:
index = Index(index, session=session)
elif len(odf.index_map) == 1:
if odf._ddb_dtypes[odf._index_columns[0]] in dolphindb_temporal_types:
index = DatetimeIndex._from_internal(odf, index)
else:
index = Index(odf, session=session)
elif len(odf.index_map) == 0:
index = Index([], session=session)
else:
index = MultiIndex(odf, session=session)
elif isinstance(index, pd.DatetimeIndex):
index = DatetimeIndex._from_internal(odf, index)
else:
raise TypeError("Unsupported index type")
return index
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if value is not None and not isinstance(value, str):
raise TypeError("Index.name must be a string")
self._name = value
@property
def names(self):
return self._names
def rename(self, value, inplace=False):
raise NotImplementedError()
@property
def _index_column(self):
assert isinstance(self._internal, _InternalFrame)
return self._index_columns[0]
@property
def _index(self):
return self
def _get_data_select_list(self):
if isinstance(self._internal, _ConstantSP):
return [self._var_name]
else:
return self._index_columns
def _to_script_list(self):
if isinstance(self._internal, _ConstantSP):
assert(self._form != ddb.settings.DF_TABLE)
return [self._var_name]
else:
return [sql_select([col], self._var_name, is_exec=True)
for col in self._index_columns]
def to_pandas(self):
if isinstance(self._internal, _ConstantSP):
df = self._session.run(self._to_script())
return pd.Index(df).rename(self._name)
elif len(self._index_columns) == 0:
raise ValueError("Frame has no default index if it is not in memory")
else:
df = self._session.run(self._to_script())
return pd.Index(df.iloc[:,0]).rename(self._name)
def to_numpy(self):
return self.to_pandas().to_numpy()
def _get_data_select_list(self):
if isinstance(self._internal, _ConstantSP):
return [self._var_name]
else:
return [f"{self._var_name}.{self._index_column}"]
def _unary_agg_op(self, func, *args, **kwargs):
if isinstance(self._internal, _ConstantSP):
script = f"{func}({self._var_name})"
else:
index_column = self._index_column
select_list = [f"{func}({index_column})"]
script = sql_select(select_list, self._var_name, is_exec=True)
return get_orca_obj_from_script(self._session, script, [], as_index=True)
def min(self, axis=None, skipna=True, *args, **kwargs):
return self._unary_agg_op("min")
def max(self, axis=None, skipna=True, *args, **kwargs):
return self._unary_agg_op("max")
def unique(self, level=None):
pass
def nunique(self, dropna=True):
pass
isna = LogicalOpsMixin.isna
notna = LogicalOpsMixin.notna
isnull = LogicalOpsMixin.isnull
notnull = LogicalOpsMixin.notnull
fillna = StatOpsMixin.fillna
dropna = StatOpsMixin.dropna
# def _binary_op(self, other, func):
# from .frame import DataFrame
# from .series import Series
# if is_dolphindb_uploadable(self):
# raise NotImplementedError()
# elif not isinstance(self, Index):
# raise TypeError("Operand must be a Series")
# elif is_dolphindb_uploadable(other):
# raise NotImplementedError()
# elif isinstance(other, DataFrame):
# raise NotImplementedError()
# elif isinstance(other, Series):
# raise NotImplementedError()
# else:
# raise TypeError("Operand must be a Series or DataFrame")
# def _logical_op(self, other, func):
# raise NotImplementedError()
# def _logical_unary_op(self, func):
# raise NotImplementedError()
@property
def values(self):
#warnings.warn("orca objects does not store data in numpy arrays. Accessing values will retrive whole data from the remote node.", Warning)
return self.to_numpy()
@property
def shape(self):
return (len(self),)
@property
def nbytes(self):
session = self._session
script = sql_select(["bytes"], "objs()", where_expr=f"name='{self._var_name}'", is_exec=True)
script += "[0]"
return session.run(script)
@property
def ndim(self):
return 1
@property
def T(self):
return self
@property
def is_all_dates(self):
return False
class MultiIndex(Index):
def __init__(self, data, names=None, session=default_session()):
if isinstance(data, _InternalFrame):
assert names is None
Index.__init__(self, data, session=session)
elif isinstance(data, pd.MultiIndex):
assert names is None
frame = data.to_frame()
var = _ConstantSP.upload_obj(session, frame)
Index.__init__(self, var, session=session)
self._names = list(data.names)
@staticmethod
def _from_pandas_multiindex(session, index):
from .frame import DataFrame
return DataFrame(data=None, session=session, index=index).index
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=None, session=default_session()):
return cls._from_pandas_multiindex(session, pd.MultiIndex.from_arrays(arrays, sortorder, names))
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None, session=default_session()):
return cls._from_pandas_multiindex(session, pd.MultiIndex.from_tuples(tuples, sortorder, names))
@classmethod
def from_product(cls, iterables, sortorder=None, names=None, session=default_session()):
return cls._from_pandas_multiindex(session, pd.MultiIndex.from_product(iterables, sortorder, names))
@classmethod
def from_frame(cls, df, sortorder=None, names=None, session=default_session()): # TODO: directly upload frame
return cls._from_pandas_multiindex(session, pd.MultiIndex.from_frame(df, sortorder, names))
@property
def names(self):
return self._names
@names.setter
def names(self, value):
raise NotImplementedError()
# def _to_script(self):
# select_list = self._index_columns
# return sql_select(select_list, self._var_name)
def to_pandas(self): # TODO: dealing with where clause
df = self._session.run(self._to_script())
return pd.MultiIndex.from_frame(df).rename(self.names)
def _unary_op(self, func):
raise TypeError(f"cannot perform {func} with thid index type: MultiIndex")
def _binary_op(self, other, func):
raise TypeError(f"cannot perform {func} with thid index type: MultiIndex")
def _logical_op(self, other, func):
raise TypeError(f"cannot perform {func} with thid index type: MultiIndex")
def _logical_unary_op(self, func):
raise | |
<= 0)
m.c692 = Constraint(expr= m.x691 - m.b3007 <= 0)
m.c693 = Constraint(expr= m.x692 - m.b3007 <= 0)
m.c694 = Constraint(expr= m.x693 - m.b3007 <= 0)
m.c695 = Constraint(expr= m.x694 - m.b3007 <= 0)
m.c696 = Constraint(expr= m.x695 - m.b3007 <= 0)
m.c697 = Constraint(expr= m.x696 - m.b3007 <= 0)
m.c698 = Constraint(expr= m.x697 - m.b3007 <= 0)
m.c699 = Constraint(expr= m.x698 - m.b3007 <= 0)
m.c700 = Constraint(expr= m.x699 - m.b3007 <= 0)
m.c701 = Constraint(expr= m.x700 - m.b3007 <= 0)
m.c702 = Constraint(expr= m.x701 - m.b3008 <= 0)
m.c703 = Constraint(expr= m.x702 - m.b3008 <= 0)
m.c704 = Constraint(expr= m.x703 - m.b3008 <= 0)
m.c705 = Constraint(expr= m.x704 - m.b3008 <= 0)
m.c706 = Constraint(expr= m.x705 - m.b3008 <= 0)
m.c707 = Constraint(expr= m.x706 - m.b3008 <= 0)
m.c708 = Constraint(expr= m.x707 - m.b3008 <= 0)
m.c709 = Constraint(expr= m.x708 - m.b3008 <= 0)
m.c710 = Constraint(expr= m.x709 - m.b3008 <= 0)
m.c711 = Constraint(expr= m.x710 - m.b3008 <= 0)
m.c712 = Constraint(expr= m.x711 - m.b3008 <= 0)
m.c713 = Constraint(expr= m.x712 - m.b3008 <= 0)
m.c714 = Constraint(expr= m.x713 - m.b3008 <= 0)
m.c715 = Constraint(expr= m.x714 - m.b3008 <= 0)
m.c716 = Constraint(expr= m.x715 - m.b3008 <= 0)
m.c717 = Constraint(expr= m.x716 - m.b3008 <= 0)
m.c718 = Constraint(expr= m.x717 - m.b3008 <= 0)
m.c719 = Constraint(expr= m.x718 - m.b3008 <= 0)
m.c720 = Constraint(expr= m.x719 - m.b3008 <= 0)
m.c721 = Constraint(expr= m.x720 - m.b3008 <= 0)
m.c722 = Constraint(expr= m.x721 - m.b3008 <= 0)
m.c723 = Constraint(expr= m.x722 - m.b3008 <= 0)
m.c724 = Constraint(expr= m.x723 - m.b3008 <= 0)
m.c725 = Constraint(expr= m.x724 - m.b3008 <= 0)
m.c726 = Constraint(expr= m.x725 - m.b3008 <= 0)
m.c727 = Constraint(expr= m.x726 - m.b3008 <= 0)
m.c728 = Constraint(expr= m.x727 - m.b3008 <= 0)
m.c729 = Constraint(expr= m.x728 - m.b3008 <= 0)
m.c730 = Constraint(expr= m.x729 - m.b3008 <= 0)
m.c731 = Constraint(expr= m.x730 - m.b3008 <= 0)
m.c732 = Constraint(expr= m.x731 - m.b3008 <= 0)
m.c733 = Constraint(expr= m.x732 - m.b3008 <= 0)
m.c734 = Constraint(expr= m.x733 - m.b3008 <= 0)
m.c735 = Constraint(expr= m.x734 - m.b3008 <= 0)
m.c736 = Constraint(expr= m.x735 - m.b3008 <= 0)
m.c737 = Constraint(expr= m.x736 - m.b3008 <= 0)
m.c738 = Constraint(expr= m.x737 - m.b3008 <= 0)
m.c739 = Constraint(expr= m.x738 - m.b3008 <= 0)
m.c740 = Constraint(expr= m.x739 - m.b3008 <= 0)
m.c741 = Constraint(expr= m.x740 - m.b3008 <= 0)
m.c742 = Constraint(expr= m.x741 - m.b3008 <= 0)
m.c743 = Constraint(expr= m.x742 - m.b3008 <= 0)
m.c744 = Constraint(expr= m.x743 - m.b3008 <= 0)
m.c745 = Constraint(expr= m.x744 - m.b3008 <= 0)
m.c746 = Constraint(expr= m.x745 - m.b3008 <= 0)
m.c747 = Constraint(expr= m.x746 - m.b3008 <= 0)
m.c748 = Constraint(expr= m.x747 - m.b3008 <= 0)
m.c749 = Constraint(expr= m.x748 - m.b3008 <= 0)
m.c750 = Constraint(expr= m.x749 - m.b3008 <= 0)
m.c751 = Constraint(expr= m.x750 - m.b3008 <= 0)
m.c752 = Constraint(expr= m.x751 - m.b3008 <= 0)
m.c753 = Constraint(expr= m.x752 - m.b3008 <= 0)
m.c754 = Constraint(expr= m.x753 - m.b3008 <= 0)
m.c755 = Constraint(expr= m.x754 - m.b3008 <= 0)
m.c756 = Constraint(expr= m.x755 - m.b3008 <= 0)
m.c757 = Constraint(expr= m.x756 - m.b3008 <= 0)
m.c758 = Constraint(expr= m.x757 - m.b3008 <= 0)
m.c759 = Constraint(expr= m.x758 - m.b3008 <= 0)
m.c760 = Constraint(expr= m.x759 - m.b3008 <= 0)
m.c761 = Constraint(expr= m.x760 - m.b3008 <= 0)
m.c762 = Constraint(expr= m.x761 - m.b3008 <= 0)
m.c763 = Constraint(expr= m.x762 - m.b3008 <= 0)
m.c764 = Constraint(expr= m.x763 - m.b3008 <= 0)
m.c765 = Constraint(expr= m.x764 - m.b3008 <= 0)
m.c766 = Constraint(expr= m.x765 - m.b3008 <= 0)
m.c767 = Constraint(expr= m.x766 - m.b3008 <= 0)
m.c768 = Constraint(expr= m.x767 - m.b3008 <= 0)
m.c769 = Constraint(expr= m.x768 - m.b3008 <= 0)
m.c770 = Constraint(expr= m.x769 - m.b3008 <= 0)
m.c771 = Constraint(expr= m.x770 - m.b3008 <= 0)
m.c772 = Constraint(expr= m.x771 - m.b3008 <= 0)
m.c773 = Constraint(expr= m.x772 - m.b3008 <= 0)
m.c774 = Constraint(expr= m.x773 - m.b3008 <= 0)
m.c775 = Constraint(expr= m.x774 - m.b3008 <= 0)
m.c776 = Constraint(expr= m.x775 - m.b3008 <= 0)
m.c777 = Constraint(expr= m.x776 - m.b3008 <= 0)
m.c778 = Constraint(expr= m.x777 - m.b3008 <= 0)
m.c779 = Constraint(expr= m.x778 - m.b3008 <= 0)
m.c780 = Constraint(expr= m.x779 - m.b3008 <= 0)
m.c781 = Constraint(expr= m.x780 - m.b3008 <= 0)
m.c782 = Constraint(expr= m.x781 - m.b3008 <= 0)
m.c783 = Constraint(expr= m.x782 - m.b3008 <= 0)
m.c784 = Constraint(expr= m.x783 - m.b3008 <= 0)
m.c785 = Constraint(expr= m.x784 - m.b3008 <= 0)
m.c786 = Constraint(expr= m.x785 - m.b3008 <= 0)
m.c787 = Constraint(expr= m.x786 - m.b3008 <= 0)
m.c788 = Constraint(expr= m.x787 - m.b3008 <= 0)
m.c789 = Constraint(expr= m.x788 - m.b3008 <= 0)
m.c790 = Constraint(expr= m.x789 - m.b3008 <= 0)
m.c791 = Constraint(expr= m.x790 - m.b3008 <= 0)
m.c792 = Constraint(expr= m.x791 - m.b3008 <= 0)
m.c793 = Constraint(expr= m.x792 - m.b3008 <= 0)
m.c794 = Constraint(expr= m.x793 - m.b3008 <= 0)
m.c795 = Constraint(expr= m.x794 - m.b3008 <= 0)
m.c796 = Constraint(expr= m.x795 - m.b3008 <= 0)
m.c797 = Constraint(expr= m.x796 - m.b3008 <= 0)
m.c798 = Constraint(expr= m.x797 - m.b3008 <= 0)
m.c799 = Constraint(expr= m.x798 - m.b3008 <= 0)
m.c800 = Constraint(expr= m.x799 - m.b3008 <= 0)
m.c801 = Constraint(expr= m.x800 - m.b3008 <= 0)
m.c802 = Constraint(expr= m.x801 - m.b3009 <= 0)
m.c803 = Constraint(expr= m.x802 - m.b3009 <= 0)
m.c804 = Constraint(expr= m.x803 - m.b3009 <= 0)
m.c805 = Constraint(expr= m.x804 - m.b3009 <= 0)
m.c806 = Constraint(expr= m.x805 - m.b3009 <= 0)
m.c807 = Constraint(expr= m.x806 - m.b3009 <= 0)
m.c808 = Constraint(expr= m.x807 - m.b3009 <= 0)
m.c809 = Constraint(expr= m.x808 - m.b3009 <= 0)
m.c810 = Constraint(expr= m.x809 - m.b3009 <= 0)
m.c811 = Constraint(expr= m.x810 - m.b3009 <= 0)
m.c812 = Constraint(expr= m.x811 - m.b3009 <= 0)
m.c813 = Constraint(expr= m.x812 - m.b3009 <= 0)
m.c814 = Constraint(expr= m.x813 - m.b3009 <= 0)
m.c815 = Constraint(expr= m.x814 - m.b3009 <= 0)
m.c816 = Constraint(expr= m.x815 - m.b3009 <= 0)
m.c817 = Constraint(expr= m.x816 - m.b3009 <= 0)
m.c818 = Constraint(expr= m.x817 - m.b3009 <= 0)
m.c819 = Constraint(expr= m.x818 - m.b3009 <= 0)
m.c820 = Constraint(expr= m.x819 - m.b3009 <= 0)
m.c821 = Constraint(expr= m.x820 - m.b3009 <= 0)
m.c822 = Constraint(expr= m.x821 - m.b3009 <= 0)
m.c823 = Constraint(expr= m.x822 - m.b3009 <= 0)
m.c824 = Constraint(expr= m.x823 - m.b3009 <= 0)
m.c825 = Constraint(expr= m.x824 - m.b3009 <= 0)
m.c826 = Constraint(expr= m.x825 - m.b3009 <= 0)
m.c827 = Constraint(expr= m.x826 - m.b3009 <= 0)
m.c828 = Constraint(expr= m.x827 - m.b3009 <= 0)
m.c829 = Constraint(expr= m.x828 - m.b3009 <= 0)
m.c830 = Constraint(expr= m.x829 - m.b3009 <= 0)
m.c831 = Constraint(expr= m.x830 - m.b3009 <= 0)
m.c832 = Constraint(expr= m.x831 - m.b3009 <= 0)
m.c833 = Constraint(expr= m.x832 - m.b3009 <= 0)
m.c834 = Constraint(expr= m.x833 - m.b3009 <= 0)
m.c835 = Constraint(expr= m.x834 - m.b3009 <= 0)
m.c836 = Constraint(expr= m.x835 - m.b3009 <= 0)
m.c837 = Constraint(expr= m.x836 - m.b3009 <= 0)
m.c838 = Constraint(expr= m.x837 - m.b3009 <= 0)
m.c839 = Constraint(expr= m.x838 - m.b3009 <= 0)
m.c840 = Constraint(expr= m.x839 - m.b3009 <= 0)
m.c841 = Constraint(expr= m.x840 - m.b3009 <= 0)
m.c842 = Constraint(expr= m.x841 - m.b3009 <= 0)
m.c843 = Constraint(expr= m.x842 - m.b3009 <= 0)
m.c844 = Constraint(expr= m.x843 - m.b3009 <= 0)
m.c845 = Constraint(expr= m.x844 - m.b3009 <= 0)
m.c846 = Constraint(expr= m.x845 - m.b3009 <= 0)
m.c847 = Constraint(expr= m.x846 - m.b3009 <= 0)
m.c848 = Constraint(expr= m.x847 - m.b3009 <= 0)
m.c849 = Constraint(expr= m.x848 - m.b3009 <= 0)
m.c850 = Constraint(expr= m.x849 - m.b3009 <= 0)
m.c851 = Constraint(expr= m.x850 - m.b3009 <= 0)
m.c852 = Constraint(expr= m.x851 - m.b3009 <= 0)
m.c853 = Constraint(expr= m.x852 - m.b3009 <= 0)
m.c854 = Constraint(expr= m.x853 - m.b3009 <= 0)
m.c855 = Constraint(expr= m.x854 - m.b3009 <= 0)
m.c856 = Constraint(expr= m.x855 - m.b3009 <= 0)
m.c857 = Constraint(expr= m.x856 - m.b3009 <= 0)
m.c858 = Constraint(expr= m.x857 - m.b3009 <= 0)
m.c859 = Constraint(expr= m.x858 - m.b3009 <= 0)
m.c860 = Constraint(expr= m.x859 - m.b3009 <= 0)
m.c861 = Constraint(expr= m.x860 - m.b3009 <= 0)
m.c862 = Constraint(expr= m.x861 - m.b3009 <= 0)
m.c863 = Constraint(expr= m.x862 - m.b3009 <= 0)
m.c864 = Constraint(expr= m.x863 - m.b3009 <= 0)
m.c865 = Constraint(expr= m.x864 - m.b3009 <= 0)
m.c866 = Constraint(expr= m.x865 - m.b3009 <= 0)
m.c867 = Constraint(expr= m.x866 - m.b3009 <= 0)
m.c868 = Constraint(expr= m.x867 - m.b3009 <= 0)
m.c869 = Constraint(expr= m.x868 - m.b3009 <= 0)
m.c870 = Constraint(expr= m.x869 - m.b3009 <= 0)
m.c871 = Constraint(expr= m.x870 - m.b3009 <= 0)
m.c872 = Constraint(expr= m.x871 - m.b3009 <= 0)
m.c873 = Constraint(expr= m.x872 - m.b3009 <= 0)
m.c874 = Constraint(expr= m.x873 - | |
# -*- coding: utf-8 -*-
"""
Module et_micc2.project
=======================
An OO interface to *micc* projects.
"""
from copy import copy
import os, sys, site, subprocess, re
import sysconfig
import shutil
import json
from pathlib import Path
from operator import xor
import requests
from types import SimpleNamespace
from importlib import import_module
import click
import semantic_version
import et_micc2.config
import et_micc2.utils
import et_micc2.expand
import et_micc2.logger
from et_micc2.tomlfile import TomlFile
import pkg_resources
__FILE__ = Path(__file__).resolve()
def micc_version():
return et_micc2.__version__
def on_vsc_cluster():
"""test if we are running on one of the VSC clusters"""
try:
os.environ['VSC_HOME']
os.environ['VSC_INSTITUTE_CLUSTER']
except:
return False
else:
return True
def is_os_tool(path_to_exe):
"""test if path_to_exe was installed as part of the OS."""
return path_to_exe.startswith('/usr/bin')
class PkgInfo:
mock = [] # list of module names to pretend missing. This is just for testing purposes.
def __init__(self, pkg_name):
if pkg_name in PkgInfo.mock:
print(f'Mock: pretending module `{pkg_name}` is missing.')
self.which = ''
else:
try:
self.pkg_dist_info = pkg_resources.get_distribution(pkg_name)
except pkg_resources.DistributionNotFound:
self.which = ''
else:
self.which = self.pkg_dist_info.location
def is_available(self):
"""Return True if the tool is available, False otherwise."""
return bool(self.which)
def version(self):
"""Return the version string of the tool, or an empty string if the tool is not available."""
return self.pkg_dist_info.version if self.which else ''
__pybind11_required_version__ = '2.6.2'
class ToolInfo:
mock = [] # list of executable names to pretend missing. This is just fortesting purposes.
def __init__(self, exe, accept_cluster_os_tools=False):
"""Check if tool 'exe' is available.
:param str exe: name of an executable
:param bool accept_cluster_os_tools: accept cluster operating system tools
:return: SimpleNamespace(which,version), where which is the location of the tool or an empty
string if it is not found or not accepted, and version is the version string (if requested)
as returned be 'exe --version'.
"""
self.exe = exe
if exe in ToolInfo.mock:
print(f'Mock: pretending tool `{exe}` is missing.')
self.which = ''
else:
# completed_which = subprocess.run(['which', exe], capture_output=True, text=True)
# self.which = completed_which.stdout.strip().replace('\n', ' ')
self.which = shutil.which(exe)
if self.which:
if on_vsc_cluster() and not accept_cluster_os_tools and is_os_tool(self.which):
self.which = ''
def is_available(self):
"""Return True if the tool is available, False otherwise."""
return bool(self.which)
def version(self):
"""Return the version string of the tool, or an empty string if the tool is not available."""
if self.which:
completed_version = subprocess.run([self.exe, '--version'], capture_output=True, text=True)
self.version = completed_version.stdout.strip().replace('\n\n','\n')#.replace('\n','\n ')
else:
self.version = ''
return self.version
_exit_missing_component = -1
def is_project_directory(path, project=None):
"""Verify that the directory :file:`path` is a project directory.
:param Path path: path to a directory.
:param Project project: if not None these variables are set:
* project.project_name
* project.package_name
* project.pyproject_toml
:returns: bool.
As a sufficident condition, we request that
* there is a pyproject.toml file, exposing the project's name:py:obj:`['tool']['poetry']['name']`
* that there is a python package or module with that name, converted by :py:meth:`pep8_module_name`.
"""
if not isinstance(path, Path):
path = Path(path)
path_to_pyproject_toml = str(path / 'pyproject.toml')
try:
pyproject_toml = TomlFile(path_to_pyproject_toml)
if not project is None:
project.pyproject_toml = pyproject_toml
# project.project_name = project_name
except Exception:
return False
return verify_project_structure(path, project)
def verify_project_structure(path, project=None):
"""Verify that there is either a Python module :file:`<package_name>.py`, or
a package :file:`<package_name>/__init__.py` (and not both).
:returns: a list with what was found. This list should have length 1. If its
length is 0, neither module.py, nor module/__init__.py were found. If its
length is 2, both were found.
"""
package_name = et_micc2.utils.pep8_module_name(path.name)
module = path / (package_name + ".py")
module = str(module.relative_to(path)) if module.is_file() else ""
package = path / package_name / "__init__.py"
package = str(package.relative_to(path)) if package.is_file() else ""
if package and module:
if project:
error(f"Package ({package_name}/__init__.py) and module ({package_name}.py) found.")
return False
elif (not module and not package):
if project:
error(f"Neither package ({package_name}/__init__.py) nor module ({package_name}.py) found.")
return False
else:
if project:
project.context.package_name = package_name
return True
def error(msg, exit_code=1, raise_runtimeerror=True):
"""Print an error message, set this project's exit_code, and optionally raise a
RuntimeError.
:param str msg: the error message
:param int exit_code: the exit_code to set
:param bool raise_runtimeerror: raise RuntimeError if True
"""
click.secho("[ERROR]\n" + msg, fg='bright_red')
if raise_runtimeerror:
raise RuntimeError(msg,exit_code)
def warning(msg):
"""Print an warning message ``msg``."""
click.secho("[WARNING]\n" + msg, fg='green')
def ask_user_to_continue_or_not(default=False, stop_message='Exiting.'):
"""Ask the user if he wants to continue or stop a command.
If the answer is to stop, sets self.exit_code to -1, and prints the stop_message.
:param bool default: The answer if the user just presses enter.
:return: True if the user wants to stop, False otherwise.
"""
if default == True:
question = 'Continue? [Yes]/No'
else:
question = 'Continue? [No]/Yes'
answer = input(question)
if not answer:
answer = default
else:
answer = answer.lower()
answer = True if answer.startswith('y') else False
if not answer:
error(stop_message, exit_code=_exit_missing_component)
class Project:
"""
An OO interface to *micc* projects.
:param types.SimpleNameSpace context: all options from the ``micc`` CLI.
"""
def __init__(self, context):
self.context = context
if hasattr(context, 'template_parameters'):
# only needed for expanding templates.
# Pick up the default parameters
parameters = self.context.preferences
parameters.update(context.template_parameters)
context.template_parameters = parameters
self.logger = None
if is_project_directory(self.context.project_path, self):
self.get_logger()
else:
# Not a project directory, only create and setup subcommands can work,
# (but setup does not construct a Project object).
if not self.context.invoked_subcommand in ('create',):
error(f'Not a project directory: `{self.context.project_path}`')
def create_cmd(self):
"""Create a new project skeleton."""
# Check for tools needed:
# . git is required for creating a local repo
# . gh is required for creating a remote repo
if self.context.project_path.exists() and os.listdir(str(self.context.project_path)):
error(
f"Cannot create project in ({self.context.project_path}):\n"
f" Directory must be empty."
)
toolinfo_git = ToolInfo('git')
if not self.context.no_git and not toolinfo_git.is_available():
if on_vsc_cluster():
warning(
'Your current environment has no suitable git command.\n'
'Load a cluster module that has git.\n'
'If you continue, this project will NOT have a local git repository.'
)
else:
warning(
'Your current environment has no git command.\n'
'To install git: https://git-scm.com/downloads.\n'
'If you continue, this project will NOT have a local git repository.'
)
self.ask_user_to_continue_or_not(stop_message='Project not created.')
if self.context.remote != 'none':
# Check that we have github username
github_username = self.context.template_parameters['github_username']
if not github_username:
error(
'Micc2 configuration does not have a github username. Creation of remote repo is not possible.\n'
'Project is not created.'
)
# Check availability of gh command:
if not ToolInfo('gh').is_available() and self.context.remote:
warning(
'The gh command is not available in your environment.\n'
'If you continue this project a remote repository will not be created.'
)
self.ask_user_to_continue_or_not(stop_message='Project not created.')
if not self.context.allow_nesting:
# Prevent the creation of a project inside another project
p = self.context.project_path.parent.resolve()
while not p.samefile(os.sep):
if is_project_directory(p):
error(
f"Cannot create project in ({self.context.project_path}):\n"
f" Specify '--allow-nesting' to create a et_micc2 project inside another et_micc2 project ({p})."
)
p = p.parent
# Proceed creating the project
self.context.project_path.mkdir(parents=True, exist_ok=True)
if not self.context.module_name:
# derive package name from project name
if not et_micc2.utils.verify_project_name(self.context.project_path.name):
error(
f"The project name ({project_name}) does not yield a PEP8 compliant module name:\n"
f" The project name must start with char, and contain only chars, digits, hyphens and underscores.\n"
f" Alternatively, provide an explicit module name with the --module-name=<name>."
)
else:
self.context.package_name = et_micc2.utils.pep8_module_name(self.context.project_path.name)
else:
self.context.package_name = self.context.module_name
try:
relative_project_path = self.context.project_path.relative_to(Path.cwd())
except ValueError:
# project_path was specified relative to cwd using ../
# use full path instead of relative path
relative_project_path = self.context.project_path
if self.context.publish:
rv = et_micc2.utils.existsOnPyPI(self.context.package_name)
if rv is False:
pass # the name is not yet in use
else:
if rv is True:
error(
f" The name '{self.context.package_name}' is already in use on PyPI.\n"
f" The project is not created.\n"
f" You must choose another name if you want to publish your code on PyPI."
)
elif isinstance(rv, requests.exceptions.ConnectionError):
error(f" ConnectionError: Check your internect connection.\n"
f" The availability of name '{self.context.package_name}' on PyPI could not be verified. \n"
f" The project is not created."
)
else: # unknown error
error(
f" {type(rv)}\n"
f" {str(rv)}\n"
f" The availability of name '{self.context.package_name}' on PyPI could not be verified. \n"
f" The project is not created."
)
source_file = str(relative_project_path / self.context.package_name / '__init__.py')
self.context.verbosity = max(1, self.context.verbosity)
# The project directory is created, so we can get ourselves a logger:
self.get_logger()
with et_micc2.logger.logtime(self):
with et_micc2.logger.log( | |
MFnDagNode_objectColorIndex(*args, **kwargs):
pass
def MDataHandle_asBool(*args, **kwargs):
pass
def MDagMessage_addChildReorderedCallback(*args, **kwargs):
pass
def MProfiler_swigregister(*args, **kwargs):
pass
def delete_MItDag(*args, **kwargs):
pass
def MFnNurbsCurve_getKnotDomain(*args, **kwargs):
pass
def MFnBlinnShader_setSpecularRollOff(*args, **kwargs):
pass
def MPlug_connectedTo(*args, **kwargs):
pass
def new_MFileIO(*args, **kwargs):
pass
def MTesselationParams_setFormatType(*args, **kwargs):
pass
def MArrayDataBuilder_assign(*args, **kwargs):
pass
def MItSurfaceCV_className(*args, **kwargs):
pass
def MFnSubd_getConnectedShaders(*args, **kwargs):
pass
def MFnLayeredShader_type(*args, **kwargs):
pass
def MFnDagNode_instanceCount(*args, **kwargs):
pass
def MWeight_influence(*args, **kwargs):
pass
def MCurveAttribute_hasIndex(*args, **kwargs):
pass
def MProfiler_getThreadId(*args, **kwargs):
pass
def MFnSpotLight_setPenumbraAngle(*args, **kwargs):
pass
def MItDag_item(*args, **kwargs):
pass
def MPlugArray_assign(*args, **kwargs):
pass
def MFnNurbsCurve_type(*args, **kwargs):
pass
def new_MEvaluationNode(*args, **kwargs):
pass
def MSyntax_makeFlagMultiUse(*args, **kwargs):
pass
def MArgList_asIntArray(*args, **kwargs):
pass
def MFnMesh_deleteVertex(*args, **kwargs):
pass
def MVector_length(*args, **kwargs):
pass
def MContainerMessage_addBoundAttrCallback(*args, **kwargs):
pass
def MFileObject_name(*args, **kwargs):
pass
def delete_MItMeshPolygon(*args, **kwargs):
pass
def MArgDatabase_getCommandArgument(*args, **kwargs):
pass
def MVector___call__(*args, **kwargs):
pass
def MComputation_endComputation(*args, **kwargs):
pass
def MTesselationParams_assign(*args, **kwargs):
pass
def MEulerRotation_invertIt(*args, **kwargs):
pass
def MArgParser_flagArgumentMTime(*args, **kwargs):
pass
def MItDependencyNodes_thisNode(*args, **kwargs):
pass
def MAttributeSpec_setName(*args, **kwargs):
pass
def MVectorArray_assign(*args, **kwargs):
pass
def MEulerRotation_asQuaternion(*args, **kwargs):
pass
def MFnMesh_extrudeFaces(*args, **kwargs):
pass
def MAngle_uiToInternal(*args, **kwargs):
pass
def MItMeshVertex_swigregister(*args, **kwargs):
pass
def MUserEventMessage_swigregister(*args, **kwargs):
pass
def MUint64Array_setSizeIncrement(*args, **kwargs):
pass
def new_MSceneMessage(*args, **kwargs):
pass
def MTesselationParams_swigregister(*args, **kwargs):
pass
def delete_MFnDependencyNode(*args, **kwargs):
pass
def MPlug_asMDataHandle(*args, **kwargs):
pass
def MItDependencyNodes_className(*args, **kwargs):
pass
def MFnCamera_setEyePoint(*args, **kwargs):
pass
def MURI_getDirectory(*args, **kwargs):
pass
def MFnDagNode_setObject(*args, **kwargs):
pass
def MFnAttribute_isUsedAsFilename(*args, **kwargs):
pass
def new_MDistance(*args, **kwargs):
pass
def MEdit_isFailed(*args, **kwargs):
pass
def delete_MMessage(*args, **kwargs):
pass
def MFnCameraSet_className(*args, **kwargs):
pass
def MUintArray___repr__(*args, **kwargs):
pass
def MPlug_isCachingFlagSet(*args, **kwargs):
pass
def MDAGDrawOverrideInfo_fPlaybackVisible_get(*args, **kwargs):
pass
def new_array3dInt(*args, **kwargs):
pass
def MFnSpotLight_setEndDistance(*args, **kwargs):
pass
def delete_MUintArray(*args, **kwargs):
pass
def MFnNurbsCurve_isParamOnCurve(*args, **kwargs):
pass
def delete_MFnNonAmbientLight(*args, **kwargs):
pass
def MFnAttribute_usesArrayDataBuilder(*args, **kwargs):
pass
def new_MColorArray(*args, **kwargs):
pass
def delete_MFnComponentListData(*args, **kwargs):
pass
def MUint64Array_clear(*args, **kwargs):
pass
def MPlugArray_length(*args, **kwargs):
pass
def MCommandMessage_addCommandOutputFilterCallback(*args, **kwargs):
pass
def MPlug_isFromReferencedFile(*args, **kwargs):
pass
def MObjectHandle___eq__(*args, **kwargs):
pass
def MQuaternion_z_get(*args, **kwargs):
pass
def delete_MFnAmbientLight(*args, **kwargs):
pass
def MQuaternion___eq__(*args, **kwargs):
pass
def MItMeshEdge_getLength(*args, **kwargs):
pass
def MItEdits_currentEditString(*args, **kwargs):
pass
def MFnPhongEShader_setRoughness(*args, **kwargs):
pass
def new_MTrimBoundaryArray(*args, **kwargs):
pass
def MObjectArray_length(*args, **kwargs):
pass
def MFnMesh_numColors(*args, **kwargs):
pass
def MScriptUtil_setUintArray(*args, **kwargs):
pass
def delete_MQuaternion(*args, **kwargs):
pass
def MItMeshEdge_setPoint(*args, **kwargs):
pass
def MTransformationMatrix_setScalePivot(*args, **kwargs):
pass
def MFloatArray_clear(*args, **kwargs):
pass
def MNurbsIntersector_getClosestPoint(*args, **kwargs):
pass
def MItInstancer_instancerPath(*args, **kwargs):
pass
def MCommandMessage_swigregister(*args, **kwargs):
pass
def MFnNurbsSurface_numPatchesInU(*args, **kwargs):
pass
def MEulerRotation_y_get(*args, **kwargs):
pass
def MTransformationMatrix_eulerRotation(*args, **kwargs):
pass
def MNodeMessage_addAttributeAddedOrRemovedCallback(*args, **kwargs):
pass
def MFnVectorArrayData_length(*args, **kwargs):
pass
def MPlug_constructHandle(*args, **kwargs):
pass
def MDGModifier_removeMultiInstance(*args, **kwargs):
pass
def shortPtr_frompointer(*args, **kwargs):
pass
def MFnNurbsSurface_distanceToPoint(*args, **kwargs):
pass
def MFnCamera_horizontalRollPivot(*args, **kwargs):
pass
def MTimerMessage_setSleepCallback(*args, **kwargs):
pass
def MNodeClass_typeName(*args, **kwargs):
pass
def MFnMesh_getColorRepresentation(*args, **kwargs):
pass
def MItGeometry_component(*args, **kwargs):
pass
def MDataHandle_asGenericInt(*args, **kwargs):
pass
def charPtr_assign(*args, **kwargs):
pass
def MFloatArray_className(*args, **kwargs):
pass
def MTime___truediv__(*args, **kwargs):
pass
def MNamespace_parentNamespace(*args, **kwargs):
pass
def MDataHandle_set2Short(*args, **kwargs):
pass
def MFloatArray___len__(*args, **kwargs):
pass
def MMeshSmoothOptions_setOpenSubdivCreaseMethod(*args, **kwargs):
pass
def MFnSet_removeMember(*args, **kwargs):
pass
def MItEdits_reset(*args, **kwargs):
pass
def MFileIO_unloadReference(*args, **kwargs):
pass
def MTime_setValue(*args, **kwargs):
pass
def MAttributeSpecArray_append(*args, **kwargs):
pass
def MMatrixArray_className(*args, **kwargs):
pass
def MPlug_setMDataHandle(*args, **kwargs):
pass
def MFnTypedAttribute_getDefault(*args, **kwargs):
pass
def MFnNonExtendedLight_setUseDepthMapShadows(*args, **kwargs):
pass
def MFnMatrixData_matrix(*args, **kwargs):
pass
def MDataHandle_asSubdSurfaceTransformed(*args, **kwargs):
pass
def MScriptUtil_asInt4Ptr(*args, **kwargs):
pass
def MItDependencyGraph_getPlugsVisited(*args, **kwargs):
pass
def MFnNurbsSurface_className(*args, **kwargs):
pass
def MFnStringData_type(*args, **kwargs):
pass
def MFnCamera_getFilmFrustum(*args, **kwargs):
pass
def MEulerRotation_z_set(*args, **kwargs):
pass
def MTimeArray_setLength(*args, **kwargs):
pass
def MAttributeIndex_setValue(*args, **kwargs):
pass
def MMatrix_matrix_get(*args, **kwargs):
pass
def delete_MFnTripleIndexedComponent(*args, **kwargs):
pass
def MFnMatrixArrayData_array(*args, **kwargs):
pass
def MDataHandle_asShort3(*args, **kwargs):
pass
def MFnAttribute_setIndeterminant(*args, **kwargs):
pass
def MScriptUtil_asInt(*args, **kwargs):
pass
def MItDependencyGraph_isTraversalDepthFirst(*args, **kwargs):
pass
def MFnNurbsCurve_distanceToPoint(*args, **kwargs):
pass
def MFileIO_getReferences(*args, **kwargs):
pass
def MTesselationParams_set3DDelta(*args, **kwargs):
pass
def MArrayDataHandle_assign(*args, **kwargs):
pass
def MMatrix___iadd__(*args, **kwargs):
pass
def new_MComputation(*args, **kwargs):
pass
def MFnSubdNames_baseFaceIndexFromId(*args, **kwargs):
pass
def MFnLayeredShader_swigregister(*args, **kwargs):
pass
def MFnMesh_syncObject(*args, **kwargs):
pass
def MFnDagNode_hiliteColor(*args, **kwargs):
pass
def MDataHandle_asChar(*args, **kwargs):
pass
def new_MProfilingScope(*args, **kwargs):
pass
def MItDag_className(*args, **kwargs):
pass
def MFnNurbsCurve_getKnots(*args, **kwargs):
pass
def MFnBlinnShader_swigregister(*args, **kwargs):
pass
def MPlug_setInt(*args, **kwargs):
pass
def MTesselationParams_setOutputType(*args, **kwargs):
pass
def delete_MArrayDataBuilder(*args, **kwargs):
pass
def MItSurfaceCV_swigregister(*args, **kwargs):
pass
def MFnSubd_tesselate(*args, **kwargs):
pass
def MFnAssembly_setRepName(*args, **kwargs):
pass
def delete_MFnLayeredShader(*args, **kwargs):
pass
def MFnDagNode_duplicate(*args, **kwargs):
pass
def MWeight_seam(*args, **kwargs):
pass
def MProfiler_getCPUId(*args, **kwargs):
pass
def MFnStringData_string(*args, **kwargs):
pass
def MFnCamera_farClippingPlane(*args, **kwargs):
pass
def delete_MFnNurbsCurve(*args, **kwargs):
pass
def MFnReflectShader_setReflectedRayDepthLimit(*args, **kwargs):
pass
def MEvaluationNode_iterator(*args, **kwargs):
pass
def MSyntax_makeFlagQueryWithFullArgs(*args, **kwargs):
pass
def MArgList_asDoubleArray(*args, **kwargs):
pass
def new_MItSurfaceCV(*args, **kwargs):
pass
def MFnSubd_polygonVertices(*args, **kwargs):
pass
def MFnAttribute_setUsedAsFilename(*args, **kwargs):
pass
def MFnIntArrayData_className(*args, **kwargs):
pass
def MFnDagNode_addChild(*args, **kwargs):
pass
def MVector_normal(*args, **kwargs):
pass
def MProfiler_addCategory(*args, **kwargs):
pass
def delete_MItCurveCV(*args, **kwargs):
pass
def MFnNumericData_setData2Int(*args, **kwargs):
pass
def MFnLambertShader_ambientColor(*args, **kwargs):
pass
def MFloatArray___radd__(*args, **kwargs):
pass
def MEulerRotation_order_set(*args, **kwargs):
pass
def MStreamUtils_writeInt(*args, **kwargs):
pass
def MFnLight_setShadowColor(*args, **kwargs):
pass
def MArgDatabase_getObjects(*args, **kwargs):
pass
def MItSubdFace_isValid(*args, **kwargs):
pass
def MFnSubd_creasesGetAll(*args, **kwargs):
pass
def MFnDependencyNode_dgTimerOn(*args, **kwargs):
pass
def MComputation_setProgressRange(*args, **kwargs):
pass
def MPoint___eq__(*args, **kwargs):
pass
def MPlug_setMAngle(*args, **kwargs):
pass
def MInt64Array_clear(*args, **kwargs):
pass
def MFnNumericAttribute_swigregister(*args, **kwargs):
pass
def MFnLambertShader_type(*args, **kwargs):
pass
def MEulerRotation_reorder(*args, **kwargs):
pass
def MParentingEdit_parentedObject(*args, **kwargs):
pass
def MArgParser_getFlagArgumentPosition(*args, **kwargs):
pass
def MItSubdEdge_next(*args, **kwargs):
pass
def MFnSubd_vertexEditSet(*args, **kwargs):
pass
def MIteratorType_getFilterList(*args, **kwargs):
pass
def MFnGenericAttribute_className(*args, **kwargs):
pass
def MFnAssembly_getRepNamespace(*args, **kwargs):
pass
def MFnDependencyNode_isFlagSet(*args, **kwargs):
pass
def MVectorArray_set(*args, **kwargs):
pass
def MPointArray_setSizeIncrement(*args, **kwargs):
pass
def MIntArray___repr__(*args, **kwargs):
pass
def MFnNumericAttribute_create(*args, **kwargs):
pass
def MFnAttribute_setAffectsAppearance(*args, **kwargs):
pass
def MFnMesh_freeCachedIntersectionAccelerator(*args, **kwargs):
pass
def MEulerRotation_asMatrix(*args, **kwargs):
pass
def MSelectionList_merge(*args, **kwargs):
pass
def MAngle_className(*args, **kwargs):
pass
def MItSelectionList_isDone(*args, **kwargs):
pass
def MFnNumericData_getData3Short(*args, **kwargs):
pass
def MFnSubd_polygonBaseMeshAdd(*args, **kwargs):
pass
def MFnExpression_evaluate(*args, **kwargs):
pass
def MFnDependencyNode_getAffectedByAttributes(*args, **kwargs):
pass
def new_MUuid(*args, **kwargs):
pass
def MMessageNode_swigregister(*args, **kwargs):
pass
def MPlane_className(*args, **kwargs):
pass
def delete_MFloatMatrix(*args, **kwargs):
pass
def MIntArray_set(*args, **kwargs):
pass
def MFnMesh_swigregister(*args, **kwargs):
pass
def MFnAttribute_internal(*args, **kwargs):
pass
def MDoubleArray_className(*args, **kwargs):
pass
def delete_MSceneMessage(*args, **kwargs):
pass
def new_MAngle(*args, **kwargs):
pass
def MItMeshVertex_geomChanged(*args, **kwargs):
pass
def MFnStringData_set(*args, **kwargs):
pass
def MFnEnumAttribute_getMin(*args, **kwargs):
pass
def MComputation_progress(*args, **kwargs):
pass
def MFnDependencyNode_className(*args, **kwargs):
pass
def MURI_className(*args, **kwargs):
pass
def MColor_swigregister(*args, **kwargs):
pass
def MPlug_asDouble(*args, **kwargs):
pass
def MImage_readDepthMap(*args, **kwargs):
pass
def MFnMesh_setDoubleBlindData(*args, **kwargs):
pass
def MFnAttribute_indexMatters(*args, **kwargs):
pass
def MDistance_setInternalUnit(*args, **kwargs):
pass
def MRichSelection_getSymmetryMatrix(*args, **kwargs):
pass
def new_MItGeometry(*args, **kwargs):
pass
def MFn_swigregister(*args, **kwargs):
pass
def MTransformationMatrix_assign(*args, **kwargs):
pass
def MFnSpotLight_swigregister(*args, **kwargs):
pass
def new_MFnDoubleIndexedComponent(*args, **kwargs):
pass
def MFloatVector_isParallel(*args, **kwargs):
pass
def MURI_getAuthority(*args, **kwargs):
pass
def MColor___truediv__(*args, **kwargs):
pass
def MPlug_setShort(*args, **kwargs):
pass
def MImageFileInfo_swigregister(*args, **kwargs):
pass
def MFnMesh_intersect(*args, **kwargs):
pass
def MFnMesh_createBlindDataType(*args, **kwargs):
pass
def MFnArrayAttrsData_stringArray(*args, **kwargs):
pass
def MConnectDisconnectAttrEdit_dstPlug(*args, **kwargs):
pass
def array2dDouble_swigregister(*args, **kwargs):
pass
def MItMeshPolygon_zeroArea(*args, **kwargs):
pass
def MFnSpotLight_coneAngle(*args, **kwargs):
pass
def MFnCameraSet_getLayerClearDepthValue(*args, **kwargs):
pass
def MFnAttribute_parent(*args, **kwargs):
pass
def MFloatVector___truediv__(*args, **kwargs):
pass
def MDistance_setUnit(*args, **kwargs):
pass
def MColorArray_insert(*args, **kwargs):
pass
def MPlug_setCaching(*args, **kwargs):
pass
def MIffFile_className(*args, **kwargs):
pass
def MFnMesh_getVertexColors(*args, **kwargs):
pass
def MFnAreaLight_swigregister(*args, **kwargs):
pass
def MDGMessage_addConnectionCallback(*args, **kwargs):
pass
def MEdit_isTopLevel(*args, **kwargs):
pass
def MConditionMessage_addConditionCallback(*args, **kwargs):
pass
def new_array3dFloat(*args, **kwargs):
pass
def MItMeshPolygon_hasValidTriangulation(*args, **kwargs):
pass
def MFnSingleIndexedComponent_getElements(*args, **kwargs):
pass
def new_MFnCameraSet(*args, **kwargs):
pass
def MFloatVectorArray_remove(*args, **kwargs):
pass
def MUintArray___eq__(*args, **kwargs):
pass
def MPlug_elementByLogicalIndex(*args, **kwargs):
pass
def new_MGlobal(*args, **kwargs):
pass
def MPlug_isSource(*args, **kwargs):
pass
def MFnMesh_setCurrentColorSetName(*args, **kwargs):
pass
def MSyntax_setMinObjects(*args, **kwargs):
pass
def MFnNonExtendedLight_setDepthMapFilterSize(*args, **kwargs):
pass
def MDGContext_isNormal(*args, **kwargs):
pass
def MRenderPassDef_getGroup(*args, **kwargs):
pass
def delete_array3dInt(*args, **kwargs):
pass
def MItMeshPolygon_tangentIndex(*args, **kwargs):
pass
def MFnSet_removeMembers(*args, **kwargs):
pass
def MFnCompoundAttribute_numChildren(*args, **kwargs):
pass
def new_MFnSubd(*args, **kwargs):
pass
def MFloatPoint_x_set(*args, **kwargs):
pass
def MUintArray_assign(*args, **kwargs):
pass
def MPlug_isNetworked(*args, **kwargs):
pass
def MGlobal_optionVarDoubleValue(*args, **kwargs):
pass
def MFnMesh_getUV(*args, **kwargs):
pass
def MFnNonAmbientLight_className(*args, **kwargs):
pass
def MDagPath_swigregister(*args, **kwargs):
pass
def MRampAttribute_sort(*args, **kwargs):
pass
def MFnMesh_intersectFaceAtUV(*args, **kwargs):
pass
def MItMeshPolygon_count(*args, **kwargs):
pass
def delete_MFnPointLight(*args, **kwargs):
pass
def MFloatVector___add__(*args, **kwargs):
pass
def MFnComponentListData_className(*args, **kwargs):
pass
def MFloatPoint_assign(*args, **kwargs):
pass
def MUint64Array_get(*args, **kwargs):
pass
def MPlugArray_remove(*args, **kwargs):
pass
def MGlobal_executeCommand(*args, **kwargs):
pass
def new_MFnData(*args, **kwargs):
pass
def MFnMesh_createUVSetDataMeshWithName(*args, **kwargs):
pass
def MFloatMatrix___add__(*args, **kwargs):
pass
def MFnAnisotropyShader_tangentVCamera(*args, **kwargs):
pass
def MDagPath_inclusiveMatrixInverse(*args, **kwargs):
pass
def MQuaternion_w_set(*args, **kwargs):
pass
def MItMeshFaceVertex_getBinormal(*args, **kwargs):
pass
def MFnPluginData_typeId(*args, **kwargs):
pass
def delete_MFnComponent(*args, **kwargs):
pass
def MFloatPointArray_setLength(*args, **kwargs):
pass
def delete_MConditionMessage(*args, **kwargs):
pass
def MTypeId___eq__(*args, **kwargs):
pass
def MColorArray_setLength(*args, **kwargs):
pass
def MGlobal_setObjectSelectionMask(*args, **kwargs):
pass
def MFnMesh_getFaceVertexBinormal(*args, **kwargs):
pass
def MFnAmbientLight_className(*args, **kwargs):
pass
def MDagPath_getAllPathsTo(*args, **kwargs):
pass
def MQuaternion___ne__(*args, **kwargs):
pass
def MItMeshEdge_className(*args, **kwargs):
pass
def MFnSubd_vertexBaseMeshAdd(*args, **kwargs):
pass
def MFnPhongEShader_highlightSize(*args, **kwargs):
pass
def MFnCamera_centerOfInterest(*args, **kwargs):
pass
def MFloatMatrix___eq__(*args, **kwargs):
pass
def delete_MTrimBoundaryArray(*args, **kwargs):
pass
def MCallbackIdArray_set(*args, **kwargs):
pass
def MObjectArray_remove(*args, **kwargs):
pass
def MGlobal_getLiveList(*args, **kwargs):
pass
def MFnMesh_setVertexNormal(*args, **kwargs):
pass
def MFnAttribute_swigregister(*args, **kwargs):
pass
def MFnLight_centerOfIllumination(*args, **kwargs):
pass
def MUint64Array___repr__(*args, **kwargs):
pass
def MDAGDrawOverrideInfo_fOverrideEnabled_get(*args, **kwargs):
pass
def MQuaternion_assign(*args, **kwargs):
pass
def MItMeshEdge_isSmooth(*args, **kwargs):
pass
def MFnNurbsSurface_swigregister(*args, **kwargs):
pass
def MFnCamera_setNearFarClippingPlanes(*args, **kwargs):
pass
def MFloatArray___iadd__(*args, **kwargs):
pass
def MTransformationMatrix_scalePivotTranslation(*args, **kwargs):
pass
def MFloatMatrix___mul__(*args, **kwargs):
pass
def MCacheFormatDescription_getTimePerFrame(*args, **kwargs):
pass
def MFnVolumeLight_setConeEndRadius(*args, **kwargs):
pass
def MFnMesh_getRawPoints(*args, **kwargs):
pass
def MFnContainerNode_clear(*args, **kwargs):
pass
def MDagPathArray___getitem__(*args, **kwargs):
pass
def MScriptUtil_setFloat4ArrayItem(*args, **kwargs):
pass
def boolPtr_assign(*args, **kwargs):
pass
def MItInstancer_path(*args, **kwargs):
pass
def MContainerMessage_className(*args, **kwargs):
pass
def MFnNurbsSurface_numPatchesInV(*args, **kwargs):
pass
def MFnCamera_setDisplayFilmGate(*args, **kwargs):
pass
def MFloatArray_insert(*args, **kwargs):
pass
def MTransformationMatrix_rotateTo(*args, **kwargs):
pass
def MBoundingBox_transformUsing(*args, **kwargs):
pass
def MNodeMessage_addNodeDirtyCallback(*args, **kwargs):
pass
def MFnVectorArrayData_copyTo(*args, **kwargs):
pass
def MFnMesh_anyIntersection(*args, **kwargs):
pass
def MItGeometry_reset(*args, **kwargs):
pass
def MFnTransform_setLimit(*args, **kwargs):
pass
def MDGModifier_setMetadata(*args, **kwargs):
pass
def MScriptUtil_getInt2ArrayItem(*args, **kwargs):
pass
def shortPtr_swigregister(*args, **kwargs):
pass
def MItGeometry_currentItem(*args, **kwargs):
pass
def MFnNurbsSurface_intersect(*args, **kwargs):
pass
def MFnCamera_setVerticalRollPivot(*args, **kwargs):
pass
def MFileObject_resolveMethod(*args, **kwargs):
pass
def MTimerMessage_sleepCallback(*args, **kwargs):
pass
def MAttributePatternArray_assign(*args, **kwargs):
pass
def MFnMesh_getPoints(*args, **kwargs):
pass
def MNodeClass_classification(*args, **kwargs):
pass
def MFnUnitAttribute_hasSoftMax(*args, **kwargs):
pass
def MFnNurbsCurve_findLengthFromParam(*args, **kwargs):
pass
def MFnMesh_numFaceVertices(*args, **kwargs):
pass
def MFnTransform_setRotatePivot(*args, **kwargs):
pass
def MDataHandle_asGenericInt64(*args, **kwargs):
pass
def MDGModifier_removeExtensionAttributeIfUnset(*args, **kwargs):
pass
def MScriptUtil_setUshortArray(*args, **kwargs):
pass
def charPtr_value(*args, **kwargs):
pass
def MIteratorType_setFilterList(*args, **kwargs):
pass
def MFnNurbsSurface_knotInU(*args, **kwargs):
pass
def MFloatMatrix_adjoint(*args, **kwargs):
pass
def MFnCamera_setHorizontalPan(*args, **kwargs):
pass
def MFileObject_setRawURI(*args, **kwargs):
pass
def MTime___itruediv__(*args, **kwargs):
pass
def new_MAttributePattern(*args, **kwargs):
pass
def MNamespace_removeNamespace(*args, **kwargs):
pass
def MFnUint64SingleIndexedComponent_addElement(*args, **kwargs):
pass
def MFnMesh_addHoles(*args, **kwargs):
pass
def MFnTransform_transformation(*args, **kwargs):
pass
def new_MCurveAttribute(*args, **kwargs):
pass
def MDataHandle_set2Int(*args, **kwargs):
pass
def MMeshSmoothOptions_openSubdivCreaseMethod(*args, **kwargs):
pass
def delete_MFnMatrixAttribute(*args, **kwargs):
pass
def MItEdits_next(*args, **kwargs):
pass
def MFnNurbsSurface_isKnotV(*args, **kwargs):
pass
def MFnCamera_verticalFilmOffset(*args, **kwargs):
pass
def MFileIO_unloadReferenceByNode(*args, **kwargs):
pass
def MTime_asUnits(*args, **kwargs):
pass
def MAttributeSpecArray_copy(*args, **kwargs):
pass
def MMatrixArray_swigregister(*args, **kwargs):
pass
def MItGeometry_swigregister(*args, **kwargs):
pass
def MFnTypedAttribute_setDefault(*args, **kwargs):
pass
def MFnMatrixData_set(*args, **kwargs):
pass
def MDataHandle_geometryTransformMatrix(*args, **kwargs):
pass
def MMeshSmoothOptions_setSmoothUVs(*args, **kwargs):
pass
def MScriptUtil_asShort2Ptr(*args, **kwargs):
pass
def MItDependencyGraph_getNodePath(*args, **kwargs):
pass
def new_MFnNurbsSurface(*args, **kwargs):
pass
def MFnMesh_getPolygonTriangleVertices(*args, **kwargs):
pass
def MFnCamera_getPortFieldOfView(*args, **kwargs):
pass
def MFnCamera_getViewingFrustum(*args, **kwargs):
pass
def MFileIO_beforeImportFilename(*args, **kwargs):
pass
def MTimeArray_length(*args, **kwargs):
pass
def MAttributeIndex_setLower(*args, **kwargs):
pass
def MMatrix_className(*args, **kwargs):
pass
def MFnTripleIndexedComponent_className(*args, **kwargs):
pass
def MFnMatrixArrayData_create(*args, **kwargs):
pass
def new_MFnAssembly(*args, **kwargs):
pass
def MDataHandle_asLong3(*args, **kwargs):
pass
def MFnLambertShader_setRefractiveIndex(*args, **kwargs):
pass
def MMeshIntersector_getClosestPoint(*args, **kwargs):
pass
def MScriptUtil_asShort(*args, **kwargs):
| |
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from core.models import *
from datetime import date
from django.contrib.auth.decorators import login_required
from django.contrib import messages
import logging
logger = logging.getLogger(__name__)
from django.forms import modelform_factory
from core.mocfunctions import *
# my_organizations returns the list of organizations that this user
# is the admin for -- this is normally one, but could be several
def my_organizations(request, id=None):
if id:
try:
return Organization.objects.get(pk=id, child_list__relationship__id=RELATIONSHIP_ID["platformu_admin"], child_list__record_parent=request.user.people)
except:
unauthorized_access(request)
else:
list = Organization.objects.filter(child_list__relationship__id=RELATIONSHIP_ID["platformu_admin"], child_list__record_parent=request.user.people)
if list:
return list
else:
# Redirect to page where user can register new organization.
return None
return redirect("platformu:create_my_organization")
# this makes sure that if I open a record of an organization, that
# my own organization indeed manages this record, which is done by checking
# the tag associated with this organization
def get_entity_record(request, my_organization, entity):
try:
return Organization.objects_unfiltered.get(
pk = entity,
tags__parent_tag_id = TAG_ID["platformu_segments"],
tags__belongs_to = my_organization,
)
except:
unauthorized_access(request)
def index(request):
context = {
"show_project_design": True,
"webpage": Webpage.objects.get(pk=31595),
}
return render(request, "metabolism_manager/index.html", context)
@login_required
def admin(request):
organizations = my_organizations(request)
if not organizations:
return redirect("platformu:create_my_organization")
else:
id = organizations[0].id
return redirect(reverse("platformu:admin_clusters", args=[id]))
context = {
"organizations": organizations,
"show_project_design": True,
}
return render(request, "metabolism_manager/admin/index.html", context)
@login_required
def create_my_organization(request):
organizations = my_organizations(request)
if request.method == "POST":
organization = Organization.objects.create(name=request.POST["name"])
RecordRelationship.objects.create(
record_parent = request.user.people,
record_child = organization,
relationship_id = 1, # Make this person a PlatformU admin for this organization
)
RecordRelationship.objects.create(
record_parent = organization,
record_child_id = PROJECT_ID["platformu"],
relationship_id = 27, # Make this organization be signed up for PlatformU
)
messages.success(request, "Your organisation was created.")
return redirect("platformu:admin")
context = {
"organizations": organizations,
"show_project_design": True,
"title": "Create new organisation",
}
return render(request, "metabolism_manager/admin/my_organization.html", context)
@login_required
def clusters(request, organization):
my_organization = my_organizations(request, organization)
if request.method == "POST":
Tag.objects.create(
name = request.POST["name"],
parent_tag = Tag.objects.get(pk=TAG_ID["platformu_segments"]),
belongs_to = my_organization,
)
context = {
"page": "organisations",
"info": my_organization,
"tags": Tag.objects.filter(belongs_to=organization, parent_tag__id=TAG_ID["platformu_segments"]).order_by("id"),
"my_organization": my_organization,
}
return render(request, "metabolism_manager/admin/clusters.html", context)
@login_required
def admin_dashboard(request, organization=None):
types = None
data = gps = material_list = None
min_values = {}
if organization:
my_organization = my_organizations(request, organization)
else:
my_organization = my_organizations(request)
if my_organization:
my_organization = my_organization[0]
else:
return redirect("platformu:create_my_organization")
organization_list = Organization.objects_include_private.filter(
tags__parent_tag_id = TAG_ID["platformu_segments"],
tags__belongs_to = my_organization,
)
if not organization_list:
messages.error(request, "Please enter data first.")
else:
types = {
"Resources": MaterialDemand.objects.filter(owner__in=organization_list).exclude(material_type__parent_id__in=[31621,31620]),
"Space": MaterialDemand.objects.filter(owner__in=organization_list, material_type__parent_id=31621),
"Technology": MaterialDemand.objects.filter(owner__in=organization_list, material_type__parent_id=31620),
"Staff": MaterialDemand.objects.filter(owner__in=organization_list, material_type__parent_id=31620),
}
gps = organization_list[0].meta_data
if not "lat" in gps:
messages.error(request, "Please ensure that you enter the address/GPS details first.")
data = MaterialDemand.objects.filter(owner__in=organization_list)
current = data.filter(start_date__lte=date.today(), end_date__gte=date.today())
latest = data.order_by('-id')[:10]
material_list = MaterialDemand.objects.filter(owner__in=organization_list).values("material_type__name", "material_type__parent__name").distinct().order_by("material_type__name")
# We need to make each bubble relative to the smallest value in that group
# We can improve efficiency... starting with a single query to obtain only largest values
# But for now efficiency is not that big a deal
# Message from Guus: we're not using bubbles anymore, so this is no longer necessary
# Keeping it just in case
# for each in data:
# material = each.material_type
# if each.unit.multiplication_factor:
# # We always need to convert to a standard unit
# multiplied = each.unit.multiplication_factor * each.absolute_quantity()
# if material.name in min_values:
# current = min_values[material.name]
# min_values[material.name] = min([multiplied, current])
# else:
# min_values[material.name] = multiplied
context = {
"page": "dashboard",
"my_organization": my_organization,
"data": data,
"current": current,
"latest": latest,
"today": date.today(),
"material_list": material_list,
"gps": gps,
"min_values": min_values,
"load_datatables": True,
"load_leaflet": True,
"types": types,
}
return render(request, "metabolism_manager/admin/dashboard.html", context)
@login_required
def admin_map(request, organization=None):
data = gps = material_list = None
min_values = {}
if organization:
my_organization = my_organizations(request, organization)
else:
my_organization = my_organizations(request)
if my_organization:
my_organization = my_organization[0]
else:
return redirect("platformu:create_my_organization")
organization_list = Organization.objects_include_private.filter(
tags__parent_tag_id = TAG_ID["platformu_segments"],
tags__belongs_to = my_organization,
)
if not organization_list:
messages.error(request, "Please enter data first.")
else:
gps = organization_list[0].meta_data
if not "lat" in gps:
messages.error(request, "Please ensure that you enter the address/GPS details first.")
data = MaterialDemand.objects.filter(owner__in=organization_list)
material_list = MaterialDemand.objects.filter(owner__in=organization_list).values("material_type__name", "material_type__parent__name").distinct().order_by("material_type__name")
# We need to make each bubble relative to the smallest value in that group
# We can improve efficiency... starting with a single query to obtain only largest values
# But for now efficiency is not that big a deal
for each in data:
material = each.material_type
if each.unit.multiplication_factor:
# We always need to convert to a standard unit
multiplied = each.unit.multiplication_factor * each.absolute_quantity()
if material.name in min_values:
current = min_values[material.name]
min_values[material.name] = min([multiplied, current])
else:
min_values[material.name] = multiplied
context = {
"page": "map",
"my_organization": my_organization,
"data": data,
"material_list": material_list,
"gps": gps,
"min_values": min_values,
}
return render(request, "metabolism_manager/admin/map.html", context)
@login_required
def admin_data(request, organization=None):
types = None
if organization:
my_organization = my_organizations(request, organization)
else:
my_organization = my_organizations(request)
if my_organization:
my_organization = my_organization[0]
else:
return redirect("platformu:create_my_organization")
organization_list = Organization.objects_include_private.filter(
tags__parent_tag_id = TAG_ID["platformu_segments"],
tags__belongs_to = my_organization,
)
if not organization_list:
messages.error(request, "Please enter data first.")
else:
types = {
"Resources": MaterialDemand.objects.filter(owner__in=organization_list).exclude(material_type__parent_id__in=[31621,31620]),
"Space": MaterialDemand.objects.filter(owner__in=organization_list, material_type__parent_id=31621),
"Technology": MaterialDemand.objects.filter(owner__in=organization_list, material_type__parent_id=31620),
"Staff": MaterialDemand.objects.filter(owner__in=organization_list, material_type__parent_id=31620),
}
context = {
"page": "full_overview",
"my_organization": my_organization,
"load_datatables": True,
"types": types,
}
return render(request, "metabolism_manager/admin/data.html", context)
@login_required
def admin_datapoint(request, id):
data = MaterialDemand.objects.get(pk=id)
my_organization = my_organizations(request)[0]
# This is how we check that this user actually has access to this data point
info = get_entity_record(request, my_organization, data.owner.id)
context = {
"my_organization": my_organization,
"info": info,
"data": data,
"load_lightbox": True,
}
return render(request, "metabolism_manager/admin/datapoint.html", context)
@login_required
def admin_entity(request, organization, id):
my_organization = my_organizations(request, organization)
context = {
"page": "entity",
"my_organization": my_organization,
"info": get_entity_record(request, my_organization, id),
}
return render(request, "metabolism_manager/admin/entity.html", context)
@login_required
def admin_entity_form(request, organization, id=None):
my_organization = my_organizations(request, organization)
organization_list = Organization.objects_include_private.filter(
tags__parent_tag_id = TAG_ID["platformu_segments"],
tags__belongs_to = my_organization,
)
edit = False
if id:
info = get_entity_record(request, my_organization, id)
edit = True
else:
info = None
if request.method == "POST":
if not edit:
info = Organization()
info.name = request.POST["name"]
info.description = request.POST["description"]
info.url = request.POST["url"]
info.email = request.POST["email"]
if "status" in request.POST:
info.is_deleted = False
else:
info.is_deleted = True
if "image" in request.FILES:
info.image = request.FILES["image"]
info.meta_data = {
"address": request.POST.get("address"),
"employees": request.POST.get("employees"),
"lat": request.POST.get("lat"),
"lng": request.POST.get("lng"),
"sector": request.POST.get("sector"),
}
info.save()
info.sectors.clear()
if "sector" in request.POST:
info.sectors.add(Sector.objects.get(pk=request.POST["sector"]))
if "tag" in request.GET:
tag = Tag.objects.get(pk=request.GET["tag"])
info.tags.add(tag)
messages.success(request, "The information was saved.")
return redirect(reverse("platformu:admin_entity", args=[my_organization.id, info.id]))
context = {
"page": "entity_form",
"my_organization": my_organization,
"organization_list": organization_list,
"info": info,
"sectors": Sector.objects.all(),
"geoapify_api": settings.GEOAPIFY_API,
"load_select2": True,
}
return render(request, "metabolism_manager/admin/entity.form.html", context)
@login_required
def admin_entity_users(request, organization, id=None):
my_organization = my_organizations(request, organization)
info = get_entity_record(request, my_organization, id)
context = {
"page": "entity_users",
"my_organization": my_organization,
"info": info,
}
return render(request, "metabolism_manager/admin/entity.users.html", context)
@login_required
def admin_entity_materials(request, organization, id, slug=None):
my_organization = my_organizations(request, organization)
info = get_entity_record(request, my_organization, id)
main_groups = materials = None
if slug == "resources":
main_groups = Material.objects.filter(parent__isnull=True, catalog_id=31594).exclude(pk__in=[31621,31620])
materials = Material.objects.filter(parent__in=main_groups)
elif slug == "technology":
main_groups = None
materials = Material.objects.filter(parent_id=31620)
elif slug == "space":
main_groups = None
materials = Material.objects.filter(parent_id=31621)
elif slug == "staff":
main_groups = None
materials = Material.objects.filter(parent_id=31621)
context = {
"my_organization": my_organization,
"info": info,
"main_groups": main_groups,
"materials": materials,
"slug": slug,
"page": "entity_" + slug,
"data": MaterialDemand.objects.filter(owner=info, material_type__in=materials),
}
return render(request, "metabolism_manager/admin/entity.materials.html", context)
@login_required
def admin_entity_material(request, organization, id, slug, material=None, edit=None, type=None):
my_organization = my_organizations(request, organization)
info = get_entity_record(request, my_organization, id)
units = Unit.objects.all()
add_name_field = False
demand = None
if edit:
demand = get_object_or_404(MaterialDemand, pk=edit, owner=info)
material = demand.material_type.id
type = demand.type()
if material:
material = Material.objects.get(pk=material)
if material.measurement_type:
units = units.filter(type=material.measurement_type)
material_name = material.name
if material_name.lower() == "other":
add_name_field = True
fields = ["start_date", "end_date", "description", "image"]
if slug == "technology" or add_name_field:
fields = ["name"] + fields
ModelForm = modelform_factory(MaterialDemand, fields=fields)
if edit:
form = ModelForm(request.POST or None, request.FILES or None, instance=demand)
else:
form = ModelForm(request.POST or None, request.FILES or None)
if request.method == "POST":
if "delete" in request.POST:
demand.delete()
messages.success(request, "Record was deleted")
return redirect(request.GET.get("prev"))
if slug == "technology":
quantity = 1
unit_id = 15
else:
quantity = float(request.POST.get("quantity"))
unit_id = request.POST.get("unit")
if form.is_valid():
demand = form.save(commit=False)
demand.unit_id = unit_id
demand.quantity = quantity*-1 if type == "supply" else quantity
demand.material_type = material
demand.owner = info
demand.save()
messages.success(request, "Information was saved.")
return redirect(request.GET.get("prev"))
else:
messages.error(request, "We could not save your form, please fill out all fields")
context = {
"page": "entity_" + slug,
"my_organization": my_organization,
"info": info,
"form": form,
"material": material,
| |
# -*- coding: utf-8 -*-
import operator
from functools import reduce
from django.conf import settings
from django.contrib.admin import SimpleListFilter
from django.contrib.admin.views.main import ChangeList, ORDER_VAR
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.db.models import Prefetch, Q
from django.utils.translation import ugettext_lazy as _
from ralph.admin import filters, RalphAdmin, RalphTabularInline, register
from ralph.admin.filters import (
BaseObjectHostnameFilter,
ChoicesListFilter,
IPFilter,
LiquidatedStatusFilter,
MacAddressFilter,
RelatedAutocompleteFieldListFilter,
TagsListFilter,
TreeRelatedAutocompleteFilterWithDescendants,
VulnerabilitesByPatchDeadline
)
from ralph.admin.helpers import generate_html_link
from ralph.admin.m2m import RalphTabularM2MInline
from ralph.admin.mixins import BulkEditChangeListMixin
from ralph.admin.views.extra import RalphDetailViewAdmin
from ralph.admin.views.main import RalphChangeList
from ralph.admin.views.multiadd import MulitiAddAdminMixin
from ralph.assets.invoice_report import AssetInvoiceReportMixin
from ralph.assets.models.base import BaseObject
from ralph.assets.models.components import Ethernet
from ralph.assets.views import ComponentsAdminView
from ralph.attachments.admin import AttachmentsMixin
from ralph.configuration_management.views import (
SCMCheckInfo,
SCMStatusCheckInChangeListMixin
)
from ralph.data_center.forms import DataCenterAssetForm
from ralph.data_center.models.components import DiskShare, DiskShareMount
from ralph.data_center.models.hosts import DCHost
from ralph.data_center.models.physical import (
Accessory,
Connection,
DataCenter,
DataCenterAsset,
Rack,
RackAccessory,
ServerRoom
)
from ralph.data_center.models.virtual import (
BaseObjectCluster,
Cluster,
ClusterType,
Database,
VIP
)
from ralph.data_center.views import RelationsView
from ralph.data_importer import resources
from ralph.deployment.mixins import ActiveDeploymentMessageMixin
from ralph.lib.custom_fields.admin import CustomFieldValueAdminMixin
from ralph.lib.table import Table
from ralph.lib.transitions.admin import TransitionAdminMixin
from ralph.licences.models import BaseObjectLicence
from ralph.networks.forms import SimpleNetworkWithManagementIPForm
from ralph.networks.models.networks import Network
from ralph.networks.views import NetworkWithTerminatorsView
from ralph.operations.views import OperationViewReadOnlyForExisiting
from ralph.security.views import ScanStatusInChangeListMixin, SecurityInfo
from ralph.supports.models import BaseObjectsSupport
def generate_list_filter_with_common_fields(
prefix=None, postfix=None
):
result = []
if type(prefix) == list:
result.extend(prefix)
result.extend(
[
'service_env',
'configuration_path__path',
(
'configuration_path__module',
TreeRelatedAutocompleteFilterWithDescendants
),
MacAddressFilter,
IPFilter,
(
'securityscan__vulnerabilities__patch_deadline',
VulnerabilitesByPatchDeadline
),
(
'securityscan__vulnerabilities',
filters.RelatedAutocompleteFieldListFilter
),
'securityscan__is_patched',
]
)
if type(postfix) == list:
result.extend(postfix)
return result
class DCHostTypeListFilter(ChoicesListFilter):
def __init__(self, *args, **kwargs):
from ralph.data_center.models import Cluster, DataCenterAsset
from ralph.virtual.models import CloudHost, VirtualServer
models = [Cluster, DataCenterAsset, CloudHost, VirtualServer]
self.choices_list = [
(
ContentType.objects.get_for_model(model).pk,
model._meta.verbose_name
)
for model in models
]
super().__init__(*args, **kwargs)
class DCHostHostnameFilter(SimpleListFilter):
title = _('Hostname')
parameter_name = 'hostname'
template = 'admin/filters/text_filter.html'
def queryset(self, request, queryset):
if not self.value():
return queryset
fields = [
'asset__hostname',
'cloudhost__hostname',
'cluster__hostname',
'virtualserver__hostname',
'ethernet_set__ipaddress__hostname'
]
# TODO: simple if hostname would be in one model
queries = [
Q(**{'{}__icontains'.format(field): self.value().strip()})
for field in fields
]
return queryset.filter(reduce(operator.or_, queries)).distinct()
def lookups(self, request, model_admin):
return (
(1, _('Hostname')),
)
def choices(self, cl):
yield {
'selected': self.value(),
'parameter_name': self.parameter_name,
}
if settings.ENABLE_DNSAAS_INTEGRATION:
from ralph.dns.views import DNSView
class ClusterDNSView(DNSView):
pass
@register(Accessory)
class AccessoryAdmin(RalphAdmin):
search_fields = ['name']
class ClusterNetworkInline(RalphTabularInline):
form = SimpleNetworkWithManagementIPForm
model = Ethernet
exclude = ['model']
class ClusterLicencesView(RalphDetailViewAdmin):
icon = 'key'
name = 'cluster_licences'
label = _('Licences')
url_name = 'licences'
class ClusterLicenceInline(RalphTabularInline):
model = BaseObjectLicence
raw_id_fields = ('licence',)
extra = 1
inlines = [ClusterLicenceInline]
@register(ClusterType)
class ClusterTypeAdmin(RalphAdmin):
search_fields = ['name']
@register(Cluster)
class ClusterAdmin(CustomFieldValueAdminMixin, RalphAdmin):
search_fields = ['name', 'hostname', 'ethernet_set__ipaddress__hostname']
fieldsets = (
(_('Basic info'), {
'fields': (
'name', 'hostname', 'type', 'status', 'remarks', 'service_env',
'configuration_path',
'tags'
)
}),
)
raw_id_fields = ['service_env', 'configuration_path']
readonly_fields = ['get_masters_summary']
list_display = ['id', 'name', 'hostname', 'type']
list_select_related = ['type']
list_filter = [
'name', BaseObjectHostnameFilter, 'type', 'service_env',
'configuration_path', 'status'
]
change_views = [ClusterLicencesView]
if settings.ENABLE_DNSAAS_INTEGRATION:
change_views += [ClusterDNSView]
class ClusterBaseObjectInline(RalphTabularInline):
model = BaseObjectCluster
fk_name = 'cluster'
raw_id_fields = ('base_object',)
extra = 1
verbose_name = _('Base Object')
inlines = [ClusterBaseObjectInline, ClusterNetworkInline]
def get_fieldsets(self, request, obj=None):
"""
Attach master info fieldset only if show_master_summary option checked
for cluster type.
"""
fieldsets = super().get_fieldsets(request, obj)
if obj and obj.pk and obj.type.show_master_summary:
fieldsets += ((
_('Master Info'), {
'fields': (
'get_masters_summary',
)
}
),)
return fieldsets
def get_masters_summary(self, obj):
masters = obj.masters
if not masters:
return '-'
return Table(
masters,
getattr(masters[0], '_summary_fields', []),
transpose=True,
).render()
get_masters_summary.allow_tags = True
get_masters_summary.short_description = _('Master info')
@register(DataCenter)
class DataCenterAdmin(RalphAdmin):
search_fields = ['name']
class NetworkTerminatorReadOnlyInline(RalphTabularM2MInline):
model = Network
extra = 0
show_change_link = True
verbose_name_plural = _('Terminators of')
fields = [
'name', 'address',
]
def get_readonly_fields(self, request, obj=None):
return self.get_fields(request, obj)
def has_add_permission(self, request):
return False
class DataCenterAssetNetworkView(NetworkWithTerminatorsView):
pass
class DataCenterAssetSupport(RalphDetailViewAdmin):
icon = 'bookmark'
name = 'dc_asset_support'
label = _('Supports')
url_name = 'data_center_asset_support'
class DataCenterAssetSupportInline(RalphTabularInline):
model = BaseObjectsSupport
raw_id_fields = ('support',)
extra = 1
verbose_name = _('Support')
ordering = ['-support__date_to']
inlines = [DataCenterAssetSupportInline]
class DataCenterAssetLicence(RalphDetailViewAdmin):
icon = 'key'
name = 'dc_asset_licences'
label = _('Licences')
url_name = 'data_center_asset_licences'
class DataCenterAssetLicenceInline(RalphTabularInline):
model = BaseObjectLicence
raw_id_fields = ('licence',)
extra = 1
inlines = [DataCenterAssetLicenceInline]
class DataCenterAssetComponents(ComponentsAdminView):
pass
class DataCenterAssetOperation(OperationViewReadOnlyForExisiting):
name = 'dc_asset_operations'
url_name = 'data_center_asset_operations'
inlines = OperationViewReadOnlyForExisiting.admin_class.inlines
class DataCenterAssetSecurityInfo(SecurityInfo):
url_name = 'datacenter_asset_security_info'
class DataCenterAssetChangeList(RalphChangeList):
def get_ordering(self, request, queryset):
"""Adds extra ordering params for ordering by location."""
# NOTE(romcheg): slot_no is added by Django Admin automatically.
location_fields = [
'rack__server_room__data_center__name',
'rack__server_room__name',
'rack__name',
'position',
]
ordering = super(DataCenterAssetChangeList, self).get_ordering(
request, queryset
)
params = self.params
if ORDER_VAR in params:
order_params = params[ORDER_VAR].split('.')
for insert_index, p in enumerate(order_params):
try:
none, pfx, idx = p.rpartition('-')
if self.list_display[int(idx)] == 'show_location':
ordering[insert_index:insert_index] = [
'{}{}'.format(pfx, field)
for field in location_fields
]
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
return ordering
class DataCenterAssetSCMInfo(SCMCheckInfo):
url_name = 'datacenterasset_scm_info'
class DataCenterAssetRelationsView(RelationsView):
url = 'datacenterasset_relations'
@register(DataCenterAsset)
class DataCenterAssetAdmin(
SCMStatusCheckInChangeListMixin,
ScanStatusInChangeListMixin,
ActiveDeploymentMessageMixin,
MulitiAddAdminMixin,
TransitionAdminMixin,
BulkEditChangeListMixin,
AttachmentsMixin,
AssetInvoiceReportMixin,
CustomFieldValueAdminMixin,
RalphAdmin,
):
"""Data Center Asset admin class."""
add_form_template = 'data_center/datacenterasset/add_form.html'
actions = ['bulk_edit_action']
change_views = [
DataCenterAssetComponents,
DataCenterAssetNetworkView,
DataCenterAssetSecurityInfo,
DataCenterAssetSCMInfo,
DataCenterAssetRelationsView,
DataCenterAssetLicence,
DataCenterAssetSupport,
DataCenterAssetOperation,
]
form = DataCenterAssetForm
if settings.ENABLE_DNSAAS_INTEGRATION:
change_views += [DNSView]
show_transition_history = True
resource_class = resources.DataCenterAssetResource
list_display = [
'hostname',
'status',
'barcode',
'model',
'sn',
'invoice_date',
'invoice_no',
'show_location',
'service_env',
'configuration_path',
'scan_status',
'scm_status_check'
]
multiadd_summary_fields = list_display + ['rack']
one_of_mulitvalue_required = ['sn', 'barcode']
bulk_edit_list = [
'hostname', 'status', 'barcode', 'model', 'sn', 'invoice_date',
'invoice_no', 'rack', 'orientation', 'position', 'slot_no', 'price',
'provider', 'service_env', 'configuration_path', 'tags', 'start_usage'
]
bulk_edit_no_fillable = ['barcode', 'sn']
search_fields = [
'barcode', 'sn', 'hostname', 'invoice_no', 'order_no',
'ethernet_set__ipaddress__address', 'ethernet_set__ipaddress__hostname'
]
list_filter_prefix = ['hostname']
list_filter_postfix = [
'invoice_no', 'invoice_date', 'status', 'barcode', 'sn',
'order_no', 'model__name',
('model__category', RelatedAutocompleteFieldListFilter),
'depreciation_end_date', 'force_depreciation', 'remarks',
'budget_info', 'rack', 'rack__server_room',
'rack__server_room__data_center', 'position', 'property_of',
LiquidatedStatusFilter, TagsListFilter,
'fibrechannelcard_set__wwn'
]
list_filter = generate_list_filter_with_common_fields(
list_filter_prefix,
list_filter_postfix
)
date_hierarchy = 'created'
list_select_related = [
'model',
'model__manufacturer',
'model__category',
'rack',
'rack__server_room',
'rack__server_room__data_center',
'service_env',
'service_env__service',
'service_env__environment',
'configuration_path',
]
raw_id_fields = [
'model', 'rack', 'service_env', 'parent', 'budget_info',
'configuration_path',
]
raw_id_override_parent = {'parent': DataCenterAsset}
_invoice_report_name = 'invoice-data-center-asset'
readonly_fields = ['get_created_date', 'go_to_visualization']
fieldsets = (
(_('Basic info'), {
'fields': (
'hostname', 'model', 'status', 'barcode', 'sn', 'niw',
'required_support', 'remarks', 'tags', 'property_of',
'firmware_version', 'bios_version',
)
}),
(_('Location Info'), {
'fields': (
'rack', 'position', 'orientation', 'slot_no', 'parent',
'management_ip', 'management_hostname', 'go_to_visualization'
)
}),
(_('Usage info'), {
'fields': (
'service_env', 'configuration_path', 'production_year',
'production_use_date',
)
}),
(_('Financial & Order Info'), {
'fields': (
'order_no', 'invoice_date', 'invoice_no', 'task_url', 'price',
'depreciation_rate', 'depreciation_end_date',
'force_depreciation', 'source', 'provider', 'delivery_date',
'budget_info', 'start_usage', 'get_created_date',
)
}),
)
def get_multiadd_fields(self, obj=None):
multiadd_fields = [
{'field': 'sn', 'allow_duplicates': False},
{'field': 'barcode', 'allow_duplicates': False},
]
return getattr(
settings, 'MULTIADD_DATA_CENTER_ASSET_FIELDS', None
) or multiadd_fields
def go_to_visualization(self, obj):
if not obj.rack:
return '—'
url = '{}#/sr/{}/rack/{}'.format(
reverse('dc_view'),
obj.rack.server_room_id,
obj.rack.id,
)
label = ' / '.join(obj.get_location())
return generate_html_link(url, label=label, params={})
go_to_visualization.short_description = _('Visualization')
go_to_visualization.allow_tags = True
def show_location(self, obj):
return obj.location
show_location.short_description = _('Location')
show_location.allow_tags = True
# NOTE(romcheg): Django Admin can only order custom fields by one field.
# The rest of the ordering is configured in
# DataCenterAssetChangeList.get_ordering()
show_location.admin_order_field = 'slot_no'
def get_created_date(self, obj):
"""
Return created date for asset (since created is blacklisted by
permissions, it cannot be displayed directly, because only superuser
will see it)
"""
return obj.created or '-'
get_created_date.short_description = _('Created at')
def get_changelist(self, request, **kwargs):
return DataCenterAssetChangeList
@register(ServerRoom)
class ServerRoomAdmin(RalphAdmin):
list_select_related = ['data_center']
search_fields = ['name', 'data_center__name']
resource_class = resources.ServerRoomResource
list_display = ['name', 'data_center']
class RackAccessoryInline(RalphTabularInline):
model = RackAccessory
@register(Rack)
class RackAdmin(RalphAdmin):
exclude = ['accessories']
list_display = [
'name',
'server_room_name',
'data_center_name',
'reverse_ordering',
]
list_filter = ['server_room__data_center'] # TODO use fk field in filter
list_select_related = ['server_room', 'server_room__data_center']
search_fields = ['name']
inlines = [RackAccessoryInline]
resource_class = resources.RackResource
def server_room_name(self, obj):
return obj.server_room.name if obj.server_room else ''
server_room_name.short_description = _('Server room')
server_room_name.admin_order_field = 'server_room__name'
def data_center_name(self, obj):
return obj.server_room.data_center.name if obj.server_room else ''
data_center_name.short_description = _('Data Center')
data_center_name.admin_order_field = 'server_room__data_center__name'
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "server_room":
kwargs["queryset"] = ServerRoom.objects.select_related(
'data_center',
)
return super(RackAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs
)
@register(RackAccessory)
class RackAccessoryAdmin(RalphAdmin):
list_select_related = ['rack', 'accessory']
search_fields = ['accessory__name', 'rack__name']
raw_id_fields = ['rack']
list_display = ['__str__', 'position']
resource_class = resources.RackAccessoryResource
@register(Database)
class DatabaseAdmin(RalphAdmin):
pass
@register(VIP)
class VIPAdmin(RalphAdmin):
search_fields = ['name', 'ip__address']
list_display = ['name', 'ip', 'port', 'protocol', 'service_env']
list_filter = ['ip', 'port', 'protocol', 'service_env', 'parent']
list_select_related = [
'ip', 'service_env__service', 'service_env__environment'
]
raw_id_fields = ['ip', 'service_env', 'parent', 'configuration_path']
raw_id_override_parent = {'parent': Cluster}
fields = (
'name', 'ip', 'port', 'protocol', 'service_env', 'parent', 'remarks',
'tags'
)
@register(Connection)
class ConnectionAdmin(RalphAdmin):
resource_class = resources.ConnectionResource
@register(DiskShare)
class DiskShareAdmin(RalphAdmin):
pass
@register(DiskShareMount)
class DiskShareMountAdmin(RalphAdmin):
pass
class DCHostChangeList(ChangeList):
def url_for_result(self, | |
<reponame>pschmidtke/ccdutils
#!/usr/bin/env python
# software from PDBe: Protein Data Bank in Europe; https://pdbe.org
#
# Copyright 2018 EMBL - European Bioinformatics Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Structure writing module. Presently the following formats are supported:
SDF, CIF, PDB, JSON, XYZ, XML, CML.
Raises:
CCDUtilsError: If deemed format is not supported or an unrecoverable
error occurres.
"""
import copy
import json
import math
import xml.etree.ElementTree as ET
from collections import OrderedDict
from typing import List
from xml.dom import minidom
import mmCif.mmcifIO as mmcif
import rdkit
import pdbeccdutils
from pdbeccdutils.core.component import Component
from pdbeccdutils.core.exceptions import CCDUtilsError
from pdbeccdutils.core.models import ConformerType
def write_molecule(path, component: Component, remove_hs: bool = True, alt_names: bool = False,
conf_type: ConformerType = ConformerType.Ideal):
"""Export molecule in a specified format. Presently supported formats
are: PDB CCD CIF (*.cif); Mol file (*.sdf); Chemical Markup language
(*.cml); PDB file (*.pdb); XYZ file (*.xyz); XML (*.xml).
ConformerType.AllConformers is presently supported only for PDB.
Args:
path (str): Path to the file. Extension determines format to be
used.
component (Component): Component to be exported
remove_hs (bool, optional): Defaults to True. Whether or not
hydrogens should be removed.
alt_names (bool, optional): Defaults to False. Whether or not
alternate names should be exported.
conf_type (ConformerType, optional):
Defaults to ConformerType.Ideal. Conformer type to be
exported.
Raises:
CCDUtilsError: For unsupported format
"""
extension = path.split('.')[-1].lower()
str_representation = ''
if extension in ('sdf', 'mol'):
str_representation = to_sdf_str(component, remove_hs, conf_type)
elif extension == 'pdb':
str_representation = to_pdb_str(component, remove_hs, alt_names, conf_type)
elif extension in ('mmcif', 'cif'):
to_pdb_ccd_cif_file(path, component, remove_hs)
return
elif extension == 'cml':
str_representation = to_cml_str(component, remove_hs, conf_type)
elif extension == 'xml':
str_representation = to_xml_str(component, remove_hs, conf_type)
elif extension == 'xyz':
str_representation = to_xyz_str(component, remove_hs, conf_type)
elif extension == 'json':
str_representation = json.dumps(to_json_dict(component, remove_hs, conf_type), sort_keys=True, indent=4)
else:
raise CCDUtilsError('Unsupported file format: {}'.format(extension))
with open(path, 'w') as f:
f.write(str_representation)
def to_pdb_str(component: Component, remove_hs: bool = True, alt_names: bool = False,
conf_type: ConformerType = ConformerType.Ideal):
"""Converts structure to the PDB format.
Args:
component (Component): Component to be exported.
remove_hs (bool, optional): Defaults to True.
alt_names (bool, optional): Defaults to False. Whether or not
alternate atom names should be exported.
conf_type (ConformerType, optional): Defaults to ConformerType.Ideal.
Returns:
str: String representation of the component in the PDB format.
"""
(mol_to_save, conf_id, conf_type) = _prepate_structure(component, remove_hs, conf_type)
info = rdkit.Chem.rdchem.AtomPDBResidueInfo()
info.SetResidueName(component.id)
info.SetTempFactor(20.0)
info.SetOccupancy(1.0)
info.SetChainId('A')
info.SetResidueNumber(1)
info.SetIsHeteroAtom(True)
for atom in mol_to_save.GetAtoms():
flag = _get_alt_atom_name(atom) if alt_names else _get_atom_name(atom)
atom_name = '{:<4}'.format(flag) # make sure it is 4 characters
info.SetName(atom_name)
atom.SetMonomerInfo(info)
pdb_title = 'HEADER {} coordinates'.format(conf_type.name)
pdb_title += ' for PDB-CCD {}\n'.format(component.id)
pdb_title += 'COMPND {}\n'.format(component.id)
pdb_title += 'AUTHOR pdbccdutils {}\n'.format(pdbeccdutils.__version__)
pdb_title += 'AUTHOR RDKit {}\n'.format(rdkit.__version__)
pdb_body = ''
try:
pdb_body = rdkit.Chem.MolToPDBBlock(mol_to_save, conf_id)
except Exception:
pdb_body = _to_pdb_str_fallback(mol_to_save, component.id, conf_id, conf_type.name)
pdb_string = pdb_title + pdb_body
return pdb_string
def to_sdf_str(component: Component, remove_hs: bool = True,
conf_type: ConformerType = ConformerType.Ideal):
"""Converts structure to the SDF format.
Args:
component (Component): Component to be exported.
remove_hs (bool, optional): Defaults to True.
conf_type (ConformerType, optional): Defaults to ConformerType.Ideal.
Raises:
CCDUtilsError: In case the structure could not be exported.
Returns:
str: String representation of the component in the SDF format
"""
(mol_to_save, conf_id, conf_type) = _prepate_structure(component, remove_hs, conf_type)
mol_block = []
mappings = {}
if conf_type == ConformerType.AllConformers:
conformers = [ConformerType.Model, ConformerType.Ideal, ConformerType.Computed]
else:
conformers = [conf_type]
try:
for conf in conformers:
try:
s = '{} - {} conformer'.format(component.id, conf.name)
s += rdkit.Chem.MolToMolBlock(mol_to_save, confId=component.conformers_mapping[conf])
s += '$$$$'
mol_block.append(s)
except ValueError as e:
if str(e) == 'Bad Conformer Id':
pass
else:
raise CCDUtilsError('Error writing SDF file - {}'.format(e))
except Exception:
mappings = {m.name: component.conformers_mapping[m] for m in conformers}
mol_block = _to_sdf_str_fallback(mol_to_save, component.id, mappings)
return "\n".join(mol_block)
def to_xyz_str(component, remove_hs=True, conf_type=ConformerType.Ideal):
"""Converts structure to the XYZ format. Does not yet support
ConformerType.AllConformers.
Args:
component (Component): Component to be exported.
remove_hs (bool, optional): Defaults to True.
conf_type (ConformerType, optional): Defaults to ConformerType.Ideal.
Returns:
str: String representation of the component in the XYZ format
"""
(mol_to_save, conf_id, conf_type) = _prepate_structure(component, remove_hs, conf_type)
conformer = mol_to_save.GetConformer(id=conf_id)
result = list()
result.append(str(mol_to_save.GetNumAtoms()))
result.append(component.id)
for atom in mol_to_save.GetAtoms():
coords = conformer.GetAtomPosition(atom.GetIdx())
result.append('{0:<4}{1: f} {2: f} {3: f}'.
format(atom.GetSymbol(), coords.x, coords.y, coords.z))
return '\n'.join(result)
def to_xml_xml(component, remove_hs=True, conf_type=ConformerType.Ideal):
"""Converts structure to the XML format and returns its XML repr.
Args:
component (Component): Component to be exported.
remove_hs (bool, optional): Defaults to True.
conf_type (ConformerType, optional): Defaults to ConformerType.Ideal.
Returns:
xml.etree.ElementTree.Element: XML object
"""
root = ET.Element('chemComp')
id_e = ET.SubElement(root, 'id')
name_e = ET.SubElement(root, 'name')
formula_e = ET.SubElement(root, 'formula')
sys_name_e = ET.SubElement(root, 'systematicName')
s_smiles_e = ET.SubElement(root, 'stereoSmiles')
n_smiles_e = ET.SubElement(root, 'nonStereoSmiles')
inchi_e = ET.SubElement(root, 'inchi')
name_e.text = component.name
id_e.text = component.id
formula_e.text = component.formula
sys_name_e.text = next((x.value for x in component.descriptors
if x.type == 'SYSTEMATIC NAME' and x.program == 'ACDLabs'), '')
s_smiles_e.text = next((x.value for x in component.descriptors
if x.type == 'SMILES_CANONICAL' and x.program == 'CACTVS'), '')
n_smiles_e.text = next((x.value for x in component.descriptors
if x.type == 'SMILES' and x.program == 'CACTVS'), '')
inchi_e.text = component.inchi
return root
def to_xml_str(component: Component, remove_hs=True, conf_type=ConformerType.Ideal):
"""Converts structure to the XML format. Presently just molecule
metadata are serialized without any coordinates, which is in
accordance with the content of the PDBeChem area.
Args:
component (Component): Component to be exported.
remove_hs (bool, optional): Defaults to True.
conf_type (ConformerType, optional): Defaults to ConformerType.Ideal.
Returns:
str: String representation of the component in CML format.
"""
root = to_xml_xml(component, remove_hs, conf_type)
xml = ET.tostring(root, encoding='utf-8', method='xml')
pretty = minidom.parseString(xml)
return pretty.toprettyxml(indent=" ")
def to_pdb_ccd_cif_file(path, component: Component, remove_hs=True):
"""Converts structure to the PDB CIF format. Both model and ideal
coordinates are stored. In case ideal coordinates are missing, rdkit
attempts to generate 3D coordinates of the conformer.
Args:
path (str): Path to save cif file.
component (Component): Component to be exported.
remove_hs (bool, optional): Defaults to True.
"""
if not isinstance(component.ccd_cif_dict, dict):
component.ccd_cif_dict = _to_pdb_ccd_cif_dict(component)
cif_copy = copy.deepcopy(component.ccd_cif_dict)
_add_sw_info_cif(cif_copy)
_add_2d_depiction_cif(component, cif_copy)
_add_fragments_and_scaffolds_cif(component, cif_copy)
_add_rdkit_properties_cif(component, cif_copy)
_add_unichem_mapping_cif(component, cif_copy)
_add_rdkit_conformer_cif(component, cif_copy, remove_hs)
if remove_hs:
h_indices: List[int] = [i for i, x in enumerate(cif_copy['_chem_comp_atom']['type_symbol']) if x == "H"]
h_names: List[str] = [cif_copy['_chem_comp_atom']['atom_id'][i] for i in h_indices]
hb_indices = []
for key in ('atom_id_1', 'atom_id_2'):
indices = [i for i, k in enumerate(cif_copy['_chem_comp_bond'][key]) if k in h_names]
hb_indices += indices
hb_indices = list(set(hb_indices))
# scrap hydrogen atoms
for key in cif_copy['_chem_comp_atom']:
cif_copy['_chem_comp_atom'][key] = (
[k for i, k in enumerate(cif_copy['_chem_comp_atom'][key]) if i not in h_indices])
# scrap bonds to hydrogen atoms
for key in cif_copy['_chem_comp_bond']:
cif_copy['_chem_comp_bond'][key] = (
[k for i, k in enumerate(cif_copy['_chem_comp_bond'][key]) if i not in hb_indices])
cfd = mmcif.CifFileWriter(path)
cfd.write({component.id: cif_copy})
def to_cml_str(component: Component, remove_hs=True, conf_type=ConformerType.Ideal):
"""Converts structure to the EBI representation of the molecule in
CML format: http://cml.sourceforge.net/schema/cmlCore.xsd
Args:
component (Component): Component to be exported.
remove_hs (bool, optional): Defaults to True.
conf_type (ConformerType, optional): Defaults to ConformerType.Ideal.
Returns:
str: String representation of the component in CML format.
"""
(mol_to_save, conf_id, conf_type) = _prepate_structure(component, remove_hs, conf_type)
root = ET.Element('cml')
root.set('xsi:schemaLocation', 'http://cml.sourceforge.net/schema/cmlCore.xsd')
root.set('xmlns:xsi', 'http://www.w3.org/2001/XMLSchema-instance')
root.set('dictRef', 'ebiMolecule:ebiMoleculeDict.cml')
root.set('ebiMolecule', 'http://www.ebi.ac.uk/felics/molecule')
f_charge = sum([l.GetFormalCharge() for l in mol_to_save.GetAtoms()])
mol = ET.SubElement(root, 'molecule', {'id': component.id, 'formalCharge': str(f_charge)})
id_inchi = ET.SubElement(mol, 'identifier', {'dictRef': 'ebiMolecule:inchi'})
id_inchi.text = component.inchi
id_systematic = ET.SubElement(mol, 'identifier', {'dictRef': 'ebiMolecule:systematicName'})
id_systematic.text = component.name
id_formula1 = ET.SubElement(mol, 'formula', {'dictRef': 'ebiMolecule:stereoSmiles'})
id_formula2 = ET.SubElement(mol, 'formula', {'dictRef': 'ebiMolecule:nonStereoSmiles'})
id_formula1.text = next((x.value for x in component.descriptors
if x.type == 'SMILES_CANONICAL' and x.program == 'CACTVS'), '')
id_formula2.text = next((x.value for x in component.descriptors
if x.type == 'SMILES' and x.program == 'CACTVS'), '')
atom_array = ET.SubElement(mol, 'atomArray')
conformer = mol_to_save.GetConformer(id=conf_id)
for atom in mol_to_save.GetAtoms():
element = atom.GetSymbol()
a_name = _get_atom_name(atom)
coords = conformer.GetAtomPosition(atom.GetIdx())
a_entry = ET.SubElement(atom_array, 'atom', {'id': a_name, 'elementType': element})
a_entry.set('x3', str(coords.x))
a_entry.set('y3', | |
from collections import OrderedDict
from gradualelixir.gtypes import (
AnyType,
AtomLiteralType,
AtomType,
BooleanType,
ElistType,
FloatType,
FunctionType,
IntegerType,
ListType,
MapKey,
MapType,
NumberType,
TupleType,
TypeEnv,
)
from gradualelixir.pattern import (
AtomLiteralPattern,
BasePatternMatchError,
ElistPattern,
FloatPattern,
IdentPattern,
IntegerPattern,
ListPattern,
ListPatternContext,
MapPattern,
MapPatternContext,
NestedPatternMatchError,
PatternErrorEnum,
PatternMatchError,
PatternMatchSuccess,
PinIdentPattern,
TuplePattern,
TuplePatternContext,
WildPattern,
pattern_match,
)
from gradualelixir.tests import TEST_ENV
from gradualelixir.utils import long_line
integer = "integer"
float = "float"
number = "number"
any = "any"
x = "x"
y = "y"
z = "z"
px = "^x"
py = "^y"
pz = "^z"
def assert_pattern_match_ok(
pattern, type, hijacked_pattern_env=None, env=None, expected_type=None, expected_pattern_env=None
):
if TEST_ENV.get("errors_only"):
return
env = env or {}
hijacked_pattern_env = TypeEnv(hijacked_pattern_env)
expected_pattern_env = TypeEnv(expected_pattern_env or hijacked_pattern_env.env)
ret = pattern_match(pattern, type, hijacked_pattern_env, env)
assert isinstance(ret, PatternMatchSuccess)
assert ret.refined_type == expected_type
assert ret.exported_env == expected_pattern_env
if TEST_ENV.get("display_results") or TEST_ENV.get("display_results_verbose"):
print(f"\n{long_line}\n\n{ret}")
def assert_pattern_match_error(pattern, type, hijacked_pattern_env=None, env=None, expected_context=None):
if TEST_ENV.get("success_only"):
return
env = TypeEnv(env)
hijacked_pattern_env = TypeEnv(hijacked_pattern_env)
ret = pattern_match(pattern, type, hijacked_pattern_env, env)
assert isinstance(ret, PatternMatchError)
check_context_path(ret, expected_context)
if TEST_ENV.get("display_results") or TEST_ENV.get("display_results_verbose"):
print(f"\n{long_line}\n\n{ret.message(padding='')}")
def check_context_path(error_data: PatternMatchError, context_path):
if isinstance(error_data, NestedPatternMatchError):
assert isinstance(context_path, tuple)
context_instance = context_path[0]
assert error_data.context == context_instance
check_context_path(error_data.bullet, context_path[1])
else:
assert isinstance(error_data, BasePatternMatchError)
assert error_data.kind is context_path
def sett(*args):
args = list(args)
args.sort()
aux = OrderedDict()
for k in args:
aux[k] = ()
return aux
def test_tp_lit():
assert_pattern_match_ok(
AtomLiteralPattern("true"),
AtomType(),
expected_type=AtomLiteralType("true"),
)
assert_pattern_match_error(
AtomLiteralPattern("true"), IntegerType(), expected_context=PatternErrorEnum.incompatible_type_for_literal
)
def test_tp_pin():
assert_pattern_match_ok(
PinIdentPattern("x"),
IntegerType(),
env={"x": IntegerType()},
expected_type=IntegerType(),
)
assert_pattern_match_ok(
PinIdentPattern("x"),
IntegerType(),
env={"x": NumberType()},
expected_type=IntegerType(),
)
assert_pattern_match_ok(
PinIdentPattern("x"),
MapType({MapKey(1): TupleType([])}),
env={"x": MapType({MapKey(2): TupleType([])})},
expected_type=MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
)
assert_pattern_match_error(
PinIdentPattern("x"),
IntegerType(),
env={"y": IntegerType()},
expected_context=PatternErrorEnum.pinned_identifier_not_found_in_environment,
)
assert_pattern_match_error(
PinIdentPattern("x"),
IntegerType(),
env={"x": FloatType()},
expected_context=PatternErrorEnum.incompatible_type_for_pinned_variable,
)
assert_pattern_match_error(
PinIdentPattern("x"),
IntegerType(),
env={"x": FloatType()},
expected_context=PatternErrorEnum.incompatible_type_for_pinned_variable,
)
assert_pattern_match_error(
PinIdentPattern("x"),
IntegerType(),
env={"y": IntegerType()},
expected_context=PatternErrorEnum.pinned_identifier_not_found_in_environment,
)
assert_pattern_match_error(
PinIdentPattern("x"),
IntegerType(),
hijacked_pattern_env={"x": IntegerType()},
env={"y": IntegerType()},
expected_context=PatternErrorEnum.pinned_identifier_not_found_in_environment,
)
assert_pattern_match_error(
PinIdentPattern("x"),
IntegerType(),
env={"x": FunctionType([IntegerType()], IntegerType())},
expected_context=PatternErrorEnum.arrow_types_into_pinned_identifier,
)
def test_tp_wild():
assert_pattern_match_ok(WildPattern(), IntegerType(), expected_type=IntegerType())
assert_pattern_match_ok(
WildPattern(),
IntegerType(),
hijacked_pattern_env={"x": FloatType()},
env={"y": NumberType()},
expected_type=IntegerType(),
)
assert_pattern_match_ok(
WildPattern(),
ListType(FloatType()),
hijacked_pattern_env={"x": FloatType()},
env={"y": NumberType()},
expected_type=ListType(FloatType()),
)
def test_tp_var():
assert_pattern_match_ok(
IdentPattern("x"),
IntegerType(),
expected_type=IntegerType(),
expected_pattern_env={"x": IntegerType()},
)
assert_pattern_match_ok(
IdentPattern("x"),
IntegerType(),
env={"x": FloatType()},
expected_type=IntegerType(),
expected_pattern_env={"x": IntegerType()},
)
assert_pattern_match_ok(
IdentPattern("x"),
IntegerType(),
hijacked_pattern_env={"y": FloatType()},
expected_type=IntegerType(),
expected_pattern_env={"x": IntegerType(), "y": FloatType()},
)
assert_pattern_match_ok(
IdentPattern("x"),
ListType(FloatType()),
expected_type=ListType(FloatType()),
expected_pattern_env={"x": ListType(FloatType())},
)
def test_tp_varn():
assert_pattern_match_ok(
IdentPattern("x"),
IntegerType(),
hijacked_pattern_env={"x": IntegerType()},
expected_type=IntegerType(),
)
assert_pattern_match_ok(
IdentPattern("x"),
MapType({MapKey(1): TupleType([])}),
hijacked_pattern_env={"x": MapType({MapKey(2): TupleType([])})},
expected_type=MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
expected_pattern_env={"x": MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])})},
)
assert_pattern_match_ok(
IdentPattern("x"),
MapType({MapKey(1): TupleType([])}),
hijacked_pattern_env={"x": MapType({MapKey(2): TupleType([])})},
env={"x": MapType({MapKey(3): TupleType([])})},
expected_type=MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
expected_pattern_env={"x": MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])})},
)
assert_pattern_match_ok(
IdentPattern("x"),
MapType({MapKey(1): TupleType([])}),
hijacked_pattern_env={
"x": MapType({MapKey(2): TupleType([])}),
"y": MapType({MapKey(3): TupleType([])}),
},
expected_type=MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
expected_pattern_env={
"x": MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
"y": MapType({MapKey(3): TupleType([])}),
},
)
assert_pattern_match_ok(
IdentPattern("x"),
MapType({MapKey(1): TupleType([])}),
hijacked_pattern_env={"x": MapType({MapKey(2): TupleType([])})},
env={"y": MapType({MapKey(3): TupleType([])})},
expected_type=MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
expected_pattern_env={"x": MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])})},
)
assert_pattern_match_error(
IdentPattern("x"),
IntegerType(),
hijacked_pattern_env={"x": FloatType()},
expected_context=PatternErrorEnum.incompatible_type_for_variable,
)
assert_pattern_match_error(
IdentPattern("x"),
IntegerType(),
hijacked_pattern_env={"x": FunctionType([IntegerType()], IntegerType())},
expected_context=PatternErrorEnum.arrow_types_into_nonlinear_identifier,
)
assert_pattern_match_error(
IdentPattern("x"),
FunctionType([IntegerType()], IntegerType()),
hijacked_pattern_env={"x": MapType({MapKey(2): TupleType([])})},
expected_context=PatternErrorEnum.arrow_types_into_nonlinear_identifier,
)
def test_tp_elist():
assert_pattern_match_ok(ElistPattern(), ElistType(), expected_type=ElistType())
assert_pattern_match_ok(ElistPattern(), ListType(NumberType()), expected_type=ElistType())
assert_pattern_match_error(
ElistPattern(),
IntegerType(),
expected_context=PatternErrorEnum.incompatible_constructors_error,
)
assert_pattern_match_error(
ElistPattern(),
TupleType([IntegerType()]),
expected_context=PatternErrorEnum.incompatible_constructors_error,
)
def test_tp_list():
assert_pattern_match_ok(
ListPattern(IntegerPattern(1), ElistPattern()),
ListType(IntegerType()),
expected_type=ListType(IntegerType()),
)
assert_pattern_match_ok(
ListPattern(IdentPattern("x"), ElistPattern()),
ListType(IntegerType()),
expected_type=ListType(IntegerType()),
expected_pattern_env={"x": IntegerType()},
)
assert_pattern_match_ok(
ListPattern(IntegerPattern(1), ListPattern(FloatPattern(1.0), ElistPattern())),
ListType(NumberType()),
expected_type=ListType(NumberType()),
)
assert_pattern_match_ok(
ListPattern(IdentPattern("x"), ListPattern(IdentPattern("x"), ElistPattern())),
ListType(IntegerType()),
expected_type=ListType(IntegerType()),
expected_pattern_env={"x": IntegerType()},
)
assert_pattern_match_ok(
ListPattern(IdentPattern("x"), ListPattern(IdentPattern("x"), ElistPattern())),
ListType(MapType({MapKey(1): TupleType([])})),
hijacked_pattern_env={"x": MapType({MapKey(2): TupleType([])})},
expected_type=ListType(MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])})),
expected_pattern_env={"x": MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])})},
)
assert_pattern_match_ok(
ListPattern(IdentPattern("x"), ListPattern(IdentPattern("y"), ElistPattern())),
ListType(MapType({MapKey(1): TupleType([])})),
hijacked_pattern_env={"y": MapType({MapKey(2): TupleType([])})},
expected_type=ListType(MapType({MapKey(1): TupleType([])})),
expected_pattern_env={
"x": MapType({MapKey(1): TupleType([])}),
"y": MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
},
)
assert_pattern_match_ok(
ListPattern(IdentPattern("x"), ListPattern(IdentPattern("y"), ElistPattern())),
ListType(MapType({MapKey(1): TupleType([])})),
hijacked_pattern_env={"x": MapType({MapKey(2): TupleType([])})},
expected_type=ListType(MapType({MapKey(1): TupleType([])})),
expected_pattern_env={
"x": MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
"y": MapType({MapKey(1): TupleType([])}),
},
)
assert_pattern_match_ok(
ListPattern(
IdentPattern("x"),
ListPattern(
IdentPattern("x"),
ListPattern(IdentPattern("y"), ElistPattern()),
),
),
ListType(MapType({MapKey(1): TupleType([])})),
hijacked_pattern_env={"y": MapType({MapKey(2): TupleType([])})},
expected_type=ListType(MapType({MapKey(1): TupleType([])})),
expected_pattern_env={
"x": MapType({MapKey(1): TupleType([])}),
"y": MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
},
)
assert_pattern_match_ok(
ListPattern(
IdentPattern("x"),
ListPattern(
IdentPattern("x"),
ListPattern(IdentPattern("y"), ElistPattern()),
),
),
ListType(MapType({MapKey(1): TupleType([])})),
hijacked_pattern_env={"x": MapType({MapKey(2): TupleType([])})},
expected_type=ListType(MapType({MapKey(1): TupleType([])})),
expected_pattern_env={
"x": MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
"y": MapType({MapKey(1): TupleType([])}),
},
)
assert_pattern_match_error(
ListPattern(IntegerPattern(1), ElistPattern()),
IntegerType(),
expected_context=PatternErrorEnum.incompatible_constructors_error,
)
assert_pattern_match_error(
ListPattern(PinIdentPattern("x"), ElistPattern()),
ListType(IntegerType()),
expected_context=(
ListPatternContext(head=True),
PatternErrorEnum.pinned_identifier_not_found_in_environment,
),
)
assert_pattern_match_error(
ListPattern(IntegerPattern(1), ListPattern(PinIdentPattern("x"), ElistPattern())),
ListType(IntegerType()),
expected_context=(
ListPatternContext(head=False),
(ListPatternContext(head=True), PatternErrorEnum.pinned_identifier_not_found_in_environment),
),
)
def test_tp_tuple():
assert_pattern_match_ok(TuplePattern([]), TupleType([]), expected_type=TupleType([]))
assert_pattern_match_ok(
TuplePattern([IdentPattern("x")]),
TupleType([IntegerType()]),
expected_type=TupleType([IntegerType()]),
expected_pattern_env={"x": IntegerType()},
)
assert_pattern_match_ok(
TuplePattern([IdentPattern("x")]),
TupleType([MapType({MapKey(1): TupleType([])})]),
hijacked_pattern_env={"x": MapType({MapKey(2): TupleType([])})},
expected_type=TupleType([MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])})]),
expected_pattern_env={"x": MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])})},
)
assert_pattern_match_ok(
TuplePattern([IdentPattern("x"), IdentPattern("y")]),
TupleType([IntegerType(), FloatType()]),
expected_type=TupleType([IntegerType(), FloatType()]),
expected_pattern_env={"x": IntegerType(), "y": FloatType()},
)
assert_pattern_match_ok(
TuplePattern([IdentPattern("x"), IdentPattern("x")]),
TupleType([IntegerType(), IntegerType()]),
expected_type=TupleType([IntegerType(), IntegerType()]),
expected_pattern_env={"x": IntegerType()},
)
assert_pattern_match_ok(
TuplePattern([IdentPattern("x"), IdentPattern("x")]),
TupleType(
[
MapType({MapKey(1): TupleType([])}),
MapType({MapKey(2): TupleType([])}),
]
),
expected_type=TupleType(
[
MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
]
),
expected_pattern_env={"x": MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])})},
)
assert_pattern_match_ok(
TuplePattern([IdentPattern("x"), IdentPattern("y"), IdentPattern("x")]),
TupleType(
[
MapType({MapKey(1): TupleType([])}),
MapType({MapKey(2): TupleType([])}),
MapType({MapKey(3): TupleType([])}),
]
),
expected_type=TupleType(
[
MapType({MapKey(1): TupleType([]), MapKey(3): TupleType([])}),
MapType({MapKey(2): TupleType([])}),
MapType({MapKey(1): TupleType([]), MapKey(3): TupleType([])}),
]
),
expected_pattern_env={
"x": MapType({MapKey(1): TupleType([]), MapKey(3): TupleType([])}),
"y": MapType({MapKey(2): TupleType([])}),
},
)
assert_pattern_match_ok(
TuplePattern([PinIdentPattern("x"), IdentPattern("y"), IdentPattern("x")]),
TupleType(
[
MapType({MapKey(1): TupleType([])}),
MapType({MapKey(2): TupleType([])}),
MapType({MapKey(3): TupleType([])}),
]
),
env={"x": MapType({MapKey(4): TupleType([])})},
expected_type=TupleType(
[
MapType({MapKey(1): TupleType([]), MapKey(4): TupleType([])}),
MapType({MapKey(2): TupleType([])}),
MapType({MapKey(3): TupleType([])}),
]
),
expected_pattern_env={
"x": MapType({MapKey(3): TupleType([])}),
"y": MapType({MapKey(2): TupleType([])}),
},
)
assert_pattern_match_error(
TuplePattern([IdentPattern("x")]),
TupleType([FloatType(), IntegerType()]),
expected_context=PatternErrorEnum.incompatible_tuples_error,
)
assert_pattern_match_error(
TuplePattern([IntegerPattern(1), IdentPattern("x")]),
TupleType([FloatType(), IntegerType()]),
expected_context=(
TuplePatternContext(n=1),
PatternErrorEnum.incompatible_type_for_literal,
),
)
assert_pattern_match_error(
TuplePattern(
[
WildPattern(),
WildPattern(),
TuplePattern(
[
TuplePattern([IntegerPattern(1), IdentPattern("x")]),
IdentPattern("x"),
]
),
]
),
TupleType(
[
FloatType(),
FloatType(),
TupleType([TupleType([FloatType(), IntegerType()]), FloatType()]),
]
),
expected_context=(
TuplePatternContext(n=3),
(
TuplePatternContext(n=1),
(TuplePatternContext(n=1), PatternErrorEnum.incompatible_type_for_literal),
),
),
)
assert_pattern_match_error(
TuplePattern([IdentPattern("x")]),
ListType(FloatType()),
expected_context=PatternErrorEnum.incompatible_constructors_error,
)
def test_tp_map():
assert_pattern_match_ok(
MapPattern(OrderedDict()),
MapType({}),
expected_type=MapType({}),
)
assert_pattern_match_ok(
MapPattern(OrderedDict([(MapKey(1), IdentPattern("x"))])),
MapType({MapKey(1): IntegerType()}),
expected_type=MapType({MapKey(1): IntegerType()}),
expected_pattern_env={"x": IntegerType()},
)
assert_pattern_match_ok(
MapPattern(OrderedDict()),
MapType({MapKey(1): IntegerType()}),
expected_type=MapType({MapKey(1): IntegerType()}),
)
assert_pattern_match_ok(
MapPattern(OrderedDict([(MapKey(1), IdentPattern("x")), (MapKey(2), FloatPattern(2.0))])),
MapType({MapKey(1): IntegerType(), MapKey(2): FloatType()}),
expected_type=MapType({MapKey(1): IntegerType(), MapKey(2): FloatType()}),
expected_pattern_env={"x": IntegerType()},
)
assert_pattern_match_ok(
MapPattern(OrderedDict([(MapKey(2), FloatPattern(2.0)), (MapKey(1), IdentPattern("x"))])),
MapType({MapKey(1): IntegerType(), MapKey(2): FloatType()}),
expected_type=MapType({MapKey(1): IntegerType(), MapKey(2): FloatType()}),
expected_pattern_env={"x": IntegerType()},
)
assert_pattern_match_ok(
MapPattern(OrderedDict([(MapKey(2), FloatPattern(2.0)), (MapKey(1), IdentPattern("x"))])),
MapType({MapKey(2): FloatType(), MapKey(1): IntegerType()}),
expected_type=MapType({MapKey(1): IntegerType(), MapKey(2): FloatType()}),
expected_pattern_env={"x": IntegerType()},
)
assert_pattern_match_ok(
MapPattern(OrderedDict([(MapKey(1), IdentPattern("x")), (MapKey(2), FloatPattern(2.0))])),
MapType({MapKey(1): IntegerType(), MapKey(2): FloatType()}),
expected_type=MapType({MapKey(2): FloatType(), MapKey(1): IntegerType()}),
expected_pattern_env={"x": IntegerType()},
)
assert_pattern_match_ok(
MapPattern(OrderedDict([(MapKey(1), IdentPattern("x"))])),
MapType({MapKey(1): MapType({MapKey(1): TupleType([])})}),
hijacked_pattern_env={"x": MapType({MapKey(2): TupleType([])})},
expected_type=MapType({MapKey(1): MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])})}),
expected_pattern_env={"x": MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])})},
)
assert_pattern_match_ok(
MapPattern(OrderedDict([(MapKey(1), IdentPattern("x")), (MapKey(2), IdentPattern("y"))])),
MapType({MapKey(1): IntegerType(), MapKey(2): FloatType()}),
expected_type=MapType({MapKey(1): IntegerType(), MapKey(2): FloatType()}),
expected_pattern_env={"x": IntegerType(), "y": FloatType()},
)
assert_pattern_match_ok(
MapPattern(OrderedDict([(MapKey(1), IdentPattern("x")), (MapKey(2), IdentPattern("x"))])),
MapType({MapKey(1): IntegerType(), MapKey(2): IntegerType()}),
expected_type=MapType({MapKey(1): IntegerType(), MapKey(2): IntegerType()}),
expected_pattern_env={"x": IntegerType()},
)
assert_pattern_match_ok(
MapPattern(OrderedDict([(MapKey(2), IntegerPattern(3))])),
MapType({MapKey(1): IntegerType(), MapKey(2): NumberType()}),
expected_type=MapType({MapKey(1): IntegerType(), MapKey(2): IntegerType()}),
)
assert_pattern_match_ok(
MapPattern(OrderedDict([(MapKey(1), IdentPattern("x")), (MapKey(2), IdentPattern("x"))])),
MapType(
{
MapKey(1): MapType({MapKey(1): TupleType([])}),
MapKey(2): MapType({MapKey(2): TupleType([])}),
}
),
expected_type=MapType(
{
MapKey(1): MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
MapKey(2): MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])}),
}
),
expected_pattern_env={"x": MapType({MapKey(1): TupleType([]), MapKey(2): TupleType([])})},
)
assert_pattern_match_ok(
MapPattern(
OrderedDict(
[
(MapKey(1), IdentPattern("x")),
(MapKey(2), IdentPattern("y")),
(MapKey(3), IdentPattern("x")),
]
)
),
MapType(
{
MapKey(1): MapType({MapKey(1): TupleType([])}),
MapKey(2): MapType({MapKey(2): TupleType([])}),
MapKey(3): MapType({MapKey(3): TupleType([])}),
}
),
expected_type=MapType(
{
MapKey(1): MapType({MapKey(1): TupleType([]), MapKey(3): TupleType([])}),
MapKey(2): MapType({MapKey(2): TupleType([])}),
MapKey(3): MapType({MapKey(1): TupleType([]), MapKey(3): TupleType([])}),
}
),
expected_pattern_env={
"x": MapType({MapKey(1): TupleType([]), MapKey(3): TupleType([])}),
"y": MapType({MapKey(2): TupleType([])}),
},
)
assert_pattern_match_ok(
MapPattern(
OrderedDict(
[
(MapKey(1), PinIdentPattern("x")),
(MapKey(2), IdentPattern("y")),
(MapKey(3), IdentPattern("x")),
]
)
),
MapType(
{
MapKey(1): MapType({MapKey(1): TupleType([])}),
MapKey(2): MapType({MapKey(2): TupleType([])}),
MapKey(3): MapType({MapKey(3): TupleType([])}),
}
),
env={"x": MapType({MapKey(4): TupleType([])})},
expected_type=MapType(
{
MapKey(1): MapType({MapKey(1): TupleType([]), MapKey(4): TupleType([])}),
MapKey(2): MapType({MapKey(2): TupleType([])}),
MapKey(3): MapType({MapKey(3): TupleType([])}),
}
),
expected_pattern_env={
"x": MapType({MapKey(3): TupleType([])}),
"y": MapType({MapKey(2): TupleType([])}),
},
)
assert_pattern_match_error(
MapPattern(OrderedDict([(MapKey(1), IdentPattern("x"))])),
MapType({MapKey(2): FloatType(), MapKey(3): IntegerType()}),
expected_context=PatternErrorEnum.incompatible_maps_error,
)
assert_pattern_match_error(
MapPattern(OrderedDict([(MapKey(1), IntegerPattern(1)), (MapKey(2), IdentPattern("x"))])),
MapType({MapKey(1): FloatType(), MapKey(2): IntegerType()}),
expected_context=(
MapPatternContext(key=MapKey(1)),
PatternErrorEnum.incompatible_type_for_literal,
),
)
assert_pattern_match_error(
MapPattern(OrderedDict([(MapKey(2), IdentPattern("x")), (MapKey(1), IntegerPattern(1))])),
MapType({MapKey(1): FloatType(), MapKey(2): IntegerType()}),
expected_context=(
MapPatternContext(key=MapKey(1)),
PatternErrorEnum.incompatible_type_for_literal,
),
)
assert_pattern_match_error(
MapPattern(OrderedDict([(MapKey(1), IntegerPattern(1)), (MapKey(2), IdentPattern("x"))])),
MapType({MapKey(2): IntegerType(), MapKey(1): FloatType()}),
expected_context=(
MapPatternContext(key=MapKey(1)),
PatternErrorEnum.incompatible_type_for_literal,
),
)
assert_pattern_match_error(
MapPattern(OrderedDict([(MapKey(2), IdentPattern("x")), (MapKey(1), IntegerPattern(1))])),
MapType({MapKey(1): FloatType(), MapKey(2): IntegerType()}),
expected_context=(
MapPatternContext(key=MapKey(1)),
PatternErrorEnum.incompatible_type_for_literal,
),
)
assert_pattern_match_error(
MapPattern(
OrderedDict(
[
(MapKey(1), WildPattern()),
(MapKey(2), WildPattern()),
(
MapKey(3),
MapPattern(
OrderedDict(
[
(
MapKey(1),
MapPattern(
OrderedDict(
[
(MapKey(1), IntegerPattern(1)),
(MapKey(2), IdentPattern("x")),
]
)
),
),
(MapKey(2), IdentPattern("x")),
]
)
),
),
]
)
),
MapType(
{
MapKey(1): FloatType(),
MapKey(2): FloatType(),
MapKey(3): MapType(
{
MapKey(1): MapType({MapKey(1): FloatType(), MapKey(2): IntegerType()}),
MapKey(2): FloatType(),
}
),
}
),
expected_context=(
MapPatternContext(key=MapKey(3)),
(
MapPatternContext(key=MapKey(1)),
(
MapPatternContext(key=MapKey(1)),
PatternErrorEnum.incompatible_type_for_literal,
),
),
),
)
assert_pattern_match_error(
MapPattern(
OrderedDict(
[
(MapKey(2), WildPattern()),
(MapKey(3), WildPattern()),
(
MapKey(1),
MapPattern(
| |
Constraint(expr=m.x3*m.x3 - m.x653*m.b626 <= 0)
m.c655 = Constraint(expr=m.x4*m.x4 - m.x654*m.b626 <= 0)
m.c656 = Constraint(expr=m.x5*m.x5 - m.x655*m.b626 <= 0)
m.c657 = Constraint(expr=m.x6*m.x6 - m.x656*m.b626 <= 0)
m.c658 = Constraint(expr=m.x7*m.x7 - m.x657*m.b626 <= 0)
m.c659 = Constraint(expr=m.x8*m.x8 - m.x658*m.b626 <= 0)
m.c660 = Constraint(expr=m.x9*m.x9 - m.x659*m.b626 <= 0)
m.c661 = Constraint(expr=m.x10*m.x10 - m.x660*m.b626 <= 0)
m.c662 = Constraint(expr=m.x11*m.x11 - m.x661*m.b626 <= 0)
m.c663 = Constraint(expr=m.x12*m.x12 - m.x662*m.b626 <= 0)
m.c664 = Constraint(expr=m.x13*m.x13 - m.x663*m.b626 <= 0)
m.c665 = Constraint(expr=m.x14*m.x14 - m.x664*m.b626 <= 0)
m.c666 = Constraint(expr=m.x15*m.x15 - m.x665*m.b626 <= 0)
m.c667 = Constraint(expr=m.x16*m.x16 - m.x666*m.b626 <= 0)
m.c668 = Constraint(expr=m.x17*m.x17 - m.x667*m.b626 <= 0)
m.c669 = Constraint(expr=m.x18*m.x18 - m.x668*m.b626 <= 0)
m.c670 = Constraint(expr=m.x19*m.x19 - m.x669*m.b626 <= 0)
m.c671 = Constraint(expr=m.x20*m.x20 - m.x670*m.b626 <= 0)
m.c672 = Constraint(expr=m.x21*m.x21 - m.x671*m.b626 <= 0)
m.c673 = Constraint(expr=m.x22*m.x22 - m.x672*m.b626 <= 0)
m.c674 = Constraint(expr=m.x23*m.x23 - m.x673*m.b626 <= 0)
m.c675 = Constraint(expr=m.x24*m.x24 - m.x674*m.b626 <= 0)
m.c676 = Constraint(expr=m.x25*m.x25 - m.x675*m.b626 <= 0)
m.c677 = Constraint(expr=m.x26*m.x26 - m.x676*m.b627 <= 0)
m.c678 = Constraint(expr=m.x27*m.x27 - m.x677*m.b627 <= 0)
m.c679 = Constraint(expr=m.x28*m.x28 - m.x678*m.b627 <= 0)
m.c680 = Constraint(expr=m.x29*m.x29 - m.x679*m.b627 <= 0)
m.c681 = Constraint(expr=m.x30*m.x30 - m.x680*m.b627 <= 0)
m.c682 = Constraint(expr=m.x31*m.x31 - m.x681*m.b627 <= 0)
m.c683 = Constraint(expr=m.x32*m.x32 - m.x682*m.b627 <= 0)
m.c684 = Constraint(expr=m.x33*m.x33 - m.x683*m.b627 <= 0)
m.c685 = Constraint(expr=m.x34*m.x34 - m.x684*m.b627 <= 0)
m.c686 = Constraint(expr=m.x35*m.x35 - m.x685*m.b627 <= 0)
m.c687 = Constraint(expr=m.x36*m.x36 - m.x686*m.b627 <= 0)
m.c688 = Constraint(expr=m.x37*m.x37 - m.x687*m.b627 <= 0)
m.c689 = Constraint(expr=m.x38*m.x38 - m.x688*m.b627 <= 0)
m.c690 = Constraint(expr=m.x39*m.x39 - m.x689*m.b627 <= 0)
m.c691 = Constraint(expr=m.x40*m.x40 - m.x690*m.b627 <= 0)
m.c692 = Constraint(expr=m.x41*m.x41 - m.x691*m.b627 <= 0)
m.c693 = Constraint(expr=m.x42*m.x42 - m.x692*m.b627 <= 0)
m.c694 = Constraint(expr=m.x43*m.x43 - m.x693*m.b627 <= 0)
m.c695 = Constraint(expr=m.x44*m.x44 - m.x694*m.b627 <= 0)
m.c696 = Constraint(expr=m.x45*m.x45 - m.x695*m.b627 <= 0)
m.c697 = Constraint(expr=m.x46*m.x46 - m.x696*m.b627 <= 0)
m.c698 = Constraint(expr=m.x47*m.x47 - m.x697*m.b627 <= 0)
m.c699 = Constraint(expr=m.x48*m.x48 - m.x698*m.b627 <= 0)
m.c700 = Constraint(expr=m.x49*m.x49 - m.x699*m.b627 <= 0)
m.c701 = Constraint(expr=m.x50*m.x50 - m.x700*m.b627 <= 0)
m.c702 = Constraint(expr=m.x51*m.x51 - m.x701*m.b628 <= 0)
m.c703 = Constraint(expr=m.x52*m.x52 - m.x702*m.b628 <= 0)
m.c704 = Constraint(expr=m.x53*m.x53 - m.x703*m.b628 <= 0)
m.c705 = Constraint(expr=m.x54*m.x54 - m.x704*m.b628 <= 0)
m.c706 = Constraint(expr=m.x55*m.x55 - m.x705*m.b628 <= 0)
m.c707 = Constraint(expr=m.x56*m.x56 - m.x706*m.b628 <= 0)
m.c708 = Constraint(expr=m.x57*m.x57 - m.x707*m.b628 <= 0)
m.c709 = Constraint(expr=m.x58*m.x58 - m.x708*m.b628 <= 0)
m.c710 = Constraint(expr=m.x59*m.x59 - m.x709*m.b628 <= 0)
m.c711 = Constraint(expr=m.x60*m.x60 - m.x710*m.b628 <= 0)
m.c712 = Constraint(expr=m.x61*m.x61 - m.x711*m.b628 <= 0)
m.c713 = Constraint(expr=m.x62*m.x62 - m.x712*m.b628 <= 0)
m.c714 = Constraint(expr=m.x63*m.x63 - m.x713*m.b628 <= 0)
m.c715 = Constraint(expr=m.x64*m.x64 - m.x714*m.b628 <= 0)
m.c716 = Constraint(expr=m.x65*m.x65 - m.x715*m.b628 <= 0)
m.c717 = Constraint(expr=m.x66*m.x66 - m.x716*m.b628 <= 0)
m.c718 = Constraint(expr=m.x67*m.x67 - m.x717*m.b628 <= 0)
m.c719 = Constraint(expr=m.x68*m.x68 - m.x718*m.b628 <= 0)
m.c720 = Constraint(expr=m.x69*m.x69 - m.x719*m.b628 <= 0)
m.c721 = Constraint(expr=m.x70*m.x70 - m.x720*m.b628 <= 0)
m.c722 = Constraint(expr=m.x71*m.x71 - m.x721*m.b628 <= 0)
m.c723 = Constraint(expr=m.x72*m.x72 - m.x722*m.b628 <= 0)
m.c724 = Constraint(expr=m.x73*m.x73 - m.x723*m.b628 <= 0)
m.c725 = Constraint(expr=m.x74*m.x74 - m.x724*m.b628 <= 0)
m.c726 = Constraint(expr=m.x75*m.x75 - m.x725*m.b628 <= 0)
m.c727 = Constraint(expr=m.x76*m.x76 - m.x726*m.b629 <= 0)
m.c728 = Constraint(expr=m.x77*m.x77 - m.x727*m.b629 <= 0)
m.c729 = Constraint(expr=m.x78*m.x78 - m.x728*m.b629 <= 0)
m.c730 = Constraint(expr=m.x79*m.x79 - m.x729*m.b629 <= 0)
m.c731 = Constraint(expr=m.x80*m.x80 - m.x730*m.b629 <= 0)
m.c732 = Constraint(expr=m.x81*m.x81 - m.x731*m.b629 <= 0)
m.c733 = Constraint(expr=m.x82*m.x82 - m.x732*m.b629 <= 0)
m.c734 = Constraint(expr=m.x83*m.x83 - m.x733*m.b629 <= 0)
m.c735 = Constraint(expr=m.x84*m.x84 - m.x734*m.b629 <= 0)
m.c736 = Constraint(expr=m.x85*m.x85 - m.x735*m.b629 <= 0)
m.c737 = Constraint(expr=m.x86*m.x86 - m.x736*m.b629 <= 0)
m.c738 = Constraint(expr=m.x87*m.x87 - m.x737*m.b629 <= 0)
m.c739 = Constraint(expr=m.x88*m.x88 - m.x738*m.b629 <= 0)
m.c740 = Constraint(expr=m.x89*m.x89 - m.x739*m.b629 <= 0)
m.c741 = Constraint(expr=m.x90*m.x90 - m.x740*m.b629 <= 0)
m.c742 = Constraint(expr=m.x91*m.x91 - m.x741*m.b629 <= 0)
m.c743 = Constraint(expr=m.x92*m.x92 - m.x742*m.b629 <= 0)
m.c744 = Constraint(expr=m.x93*m.x93 - m.x743*m.b629 <= 0)
m.c745 = Constraint(expr=m.x94*m.x94 - m.x744*m.b629 <= 0)
m.c746 = Constraint(expr=m.x95*m.x95 - m.x745*m.b629 <= 0)
m.c747 = Constraint(expr=m.x96*m.x96 - m.x746*m.b629 <= 0)
m.c748 = Constraint(expr=m.x97*m.x97 - m.x747*m.b629 <= 0)
m.c749 = Constraint(expr=m.x98*m.x98 - m.x748*m.b629 <= 0)
m.c750 = Constraint(expr=m.x99*m.x99 - m.x749*m.b629 <= 0)
m.c751 = Constraint(expr=m.x100*m.x100 - m.x750*m.b629 <= 0)
m.c752 = Constraint(expr=m.x101*m.x101 - m.x751*m.b630 <= 0)
m.c753 = Constraint(expr=m.x102*m.x102 - m.x752*m.b630 <= 0)
m.c754 = Constraint(expr=m.x103*m.x103 - m.x753*m.b630 <= 0)
m.c755 = Constraint(expr=m.x104*m.x104 - m.x754*m.b630 <= 0)
m.c756 = Constraint(expr=m.x105*m.x105 - m.x755*m.b630 <= 0)
m.c757 = Constraint(expr=m.x106*m.x106 - m.x756*m.b630 <= 0)
m.c758 = Constraint(expr=m.x107*m.x107 - m.x757*m.b630 <= 0)
m.c759 = Constraint(expr=m.x108*m.x108 - m.x758*m.b630 <= 0)
m.c760 = Constraint(expr=m.x109*m.x109 - m.x759*m.b630 <= 0)
m.c761 = Constraint(expr=m.x110*m.x110 - m.x760*m.b630 <= 0)
m.c762 = Constraint(expr=m.x111*m.x111 - m.x761*m.b630 <= 0)
m.c763 = Constraint(expr=m.x112*m.x112 - m.x762*m.b630 <= 0)
m.c764 = Constraint(expr=m.x113*m.x113 - m.x763*m.b630 <= 0)
m.c765 = Constraint(expr=m.x114*m.x114 - m.x764*m.b630 <= 0)
m.c766 = Constraint(expr=m.x115*m.x115 - m.x765*m.b630 <= 0)
m.c767 = Constraint(expr=m.x116*m.x116 - m.x766*m.b630 <= 0)
m.c768 = Constraint(expr=m.x117*m.x117 - m.x767*m.b630 <= 0)
m.c769 = Constraint(expr=m.x118*m.x118 - m.x768*m.b630 <= 0)
m.c770 = Constraint(expr=m.x119*m.x119 - m.x769*m.b630 <= 0)
m.c771 = Constraint(expr=m.x120*m.x120 - m.x770*m.b630 <= 0)
m.c772 = Constraint(expr=m.x121*m.x121 - m.x771*m.b630 <= 0)
m.c773 = Constraint(expr=m.x122*m.x122 - m.x772*m.b630 <= 0)
m.c774 = Constraint(expr=m.x123*m.x123 - m.x773*m.b630 <= 0)
m.c775 = Constraint(expr=m.x124*m.x124 - m.x774*m.b630 <= 0)
m.c776 = Constraint(expr=m.x125*m.x125 - m.x775*m.b630 <= 0)
m.c777 = Constraint(expr=m.x126*m.x126 - m.x776*m.b631 <= 0)
m.c778 = Constraint(expr=m.x127*m.x127 - m.x777*m.b631 <= 0)
m.c779 = Constraint(expr=m.x128*m.x128 - m.x778*m.b631 <= 0)
m.c780 = Constraint(expr=m.x129*m.x129 - m.x779*m.b631 <= 0)
m.c781 = Constraint(expr=m.x130*m.x130 - m.x780*m.b631 <= 0)
m.c782 = Constraint(expr=m.x131*m.x131 - m.x781*m.b631 <= 0)
m.c783 = Constraint(expr=m.x132*m.x132 - m.x782*m.b631 <= 0)
m.c784 = Constraint(expr=m.x133*m.x133 - m.x783*m.b631 <= 0)
m.c785 = Constraint(expr=m.x134*m.x134 - m.x784*m.b631 <= 0)
m.c786 = Constraint(expr=m.x135*m.x135 - m.x785*m.b631 <= 0)
m.c787 = Constraint(expr=m.x136*m.x136 - m.x786*m.b631 <= 0)
m.c788 = Constraint(expr=m.x137*m.x137 - m.x787*m.b631 <= 0)
m.c789 = Constraint(expr=m.x138*m.x138 - m.x788*m.b631 <= 0)
m.c790 = Constraint(expr=m.x139*m.x139 - m.x789*m.b631 <= 0)
m.c791 = Constraint(expr=m.x140*m.x140 - m.x790*m.b631 <= 0)
m.c792 = Constraint(expr=m.x141*m.x141 - m.x791*m.b631 <= 0)
m.c793 = Constraint(expr=m.x142*m.x142 - m.x792*m.b631 <= 0)
m.c794 = Constraint(expr=m.x143*m.x143 - m.x793*m.b631 <= 0)
m.c795 = Constraint(expr=m.x144*m.x144 - m.x794*m.b631 <= 0)
m.c796 = Constraint(expr=m.x145*m.x145 - m.x795*m.b631 <= 0)
m.c797 = Constraint(expr=m.x146*m.x146 - m.x796*m.b631 <= 0)
m.c798 = Constraint(expr=m.x147*m.x147 - m.x797*m.b631 <= 0)
m.c799 = Constraint(expr=m.x148*m.x148 - m.x798*m.b631 <= 0)
m.c800 = Constraint(expr=m.x149*m.x149 - m.x799*m.b631 <= 0)
m.c801 = Constraint(expr=m.x150*m.x150 - m.x800*m.b631 <= 0)
m.c802 = Constraint(expr=m.x151*m.x151 - m.x801*m.b632 <= 0)
m.c803 = Constraint(expr=m.x152*m.x152 - m.x802*m.b632 <= 0)
m.c804 = Constraint(expr=m.x153*m.x153 - m.x803*m.b632 <= 0)
m.c805 = Constraint(expr=m.x154*m.x154 - m.x804*m.b632 <= 0)
m.c806 = Constraint(expr=m.x155*m.x155 - m.x805*m.b632 <= 0)
m.c807 = Constraint(expr=m.x156*m.x156 - m.x806*m.b632 <= 0)
m.c808 = Constraint(expr=m.x157*m.x157 - m.x807*m.b632 <= 0)
m.c809 = Constraint(expr=m.x158*m.x158 - m.x808*m.b632 <= 0)
m.c810 = Constraint(expr=m.x159*m.x159 - m.x809*m.b632 <= 0)
m.c811 = Constraint(expr=m.x160*m.x160 - m.x810*m.b632 <= 0)
m.c812 = Constraint(expr=m.x161*m.x161 - m.x811*m.b632 <= 0)
m.c813 = Constraint(expr=m.x162*m.x162 - m.x812*m.b632 <= 0)
m.c814 = Constraint(expr=m.x163*m.x163 - m.x813*m.b632 <= 0)
m.c815 = Constraint(expr=m.x164*m.x164 - m.x814*m.b632 <= 0)
m.c816 = Constraint(expr=m.x165*m.x165 - m.x815*m.b632 <= 0)
m.c817 = Constraint(expr=m.x166*m.x166 - m.x816*m.b632 <= 0)
m.c818 = Constraint(expr=m.x167*m.x167 - m.x817*m.b632 <= 0)
m.c819 = Constraint(expr=m.x168*m.x168 - m.x818*m.b632 <= 0)
m.c820 = Constraint(expr=m.x169*m.x169 - m.x819*m.b632 <= 0)
m.c821 = Constraint(expr=m.x170*m.x170 - m.x820*m.b632 <= 0)
m.c822 = Constraint(expr=m.x171*m.x171 - m.x821*m.b632 <= 0)
m.c823 = Constraint(expr=m.x172*m.x172 - m.x822*m.b632 <= 0)
m.c824 = Constraint(expr=m.x173*m.x173 - m.x823*m.b632 <= 0)
m.c825 = Constraint(expr=m.x174*m.x174 - m.x824*m.b632 <= 0)
m.c826 = Constraint(expr=m.x175*m.x175 - m.x825*m.b632 <= 0)
m.c827 = Constraint(expr=m.x176*m.x176 - m.x826*m.b633 <= 0)
m.c828 = Constraint(expr=m.x177*m.x177 - m.x827*m.b633 <= 0)
m.c829 = Constraint(expr=m.x178*m.x178 - m.x828*m.b633 <= 0)
m.c830 = Constraint(expr=m.x179*m.x179 - m.x829*m.b633 <= 0)
m.c831 = Constraint(expr=m.x180*m.x180 - m.x830*m.b633 <= 0)
m.c832 = Constraint(expr=m.x181*m.x181 - m.x831*m.b633 <= 0)
m.c833 = Constraint(expr=m.x182*m.x182 - m.x832*m.b633 <= 0)
m.c834 = Constraint(expr=m.x183*m.x183 - m.x833*m.b633 <= 0)
m.c835 = Constraint(expr=m.x184*m.x184 - m.x834*m.b633 <= 0)
m.c836 = Constraint(expr=m.x185*m.x185 - m.x835*m.b633 <= 0)
m.c837 = Constraint(expr=m.x186*m.x186 - m.x836*m.b633 <= 0)
m.c838 = Constraint(expr=m.x187*m.x187 - m.x837*m.b633 <= 0)
m.c839 = Constraint(expr=m.x188*m.x188 - m.x838*m.b633 <= 0)
m.c840 = Constraint(expr=m.x189*m.x189 - m.x839*m.b633 <= 0)
m.c841 = Constraint(expr=m.x190*m.x190 - m.x840*m.b633 <= 0)
m.c842 = Constraint(expr=m.x191*m.x191 - m.x841*m.b633 <= 0)
m.c843 = Constraint(expr=m.x192*m.x192 - m.x842*m.b633 <= 0)
m.c844 = Constraint(expr=m.x193*m.x193 - m.x843*m.b633 <= 0)
m.c845 = Constraint(expr=m.x194*m.x194 - m.x844*m.b633 <= 0)
m.c846 = Constraint(expr=m.x195*m.x195 - m.x845*m.b633 <= 0)
m.c847 = Constraint(expr=m.x196*m.x196 - m.x846*m.b633 <= 0)
m.c848 = Constraint(expr=m.x197*m.x197 - m.x847*m.b633 <= 0)
m.c849 = Constraint(expr=m.x198*m.x198 - m.x848*m.b633 <= 0)
m.c850 = Constraint(expr=m.x199*m.x199 - m.x849*m.b633 <= 0)
m.c851 = Constraint(expr=m.x200*m.x200 - m.x850*m.b633 <= 0)
m.c852 = Constraint(expr=m.x201*m.x201 - m.x851*m.b634 <= 0)
m.c853 = Constraint(expr=m.x202*m.x202 - m.x852*m.b634 <= 0)
m.c854 = Constraint(expr=m.x203*m.x203 - m.x853*m.b634 <= 0)
m.c855 = Constraint(expr=m.x204*m.x204 - m.x854*m.b634 <= 0)
m.c856 = Constraint(expr=m.x205*m.x205 - m.x855*m.b634 <= 0)
m.c857 = Constraint(expr=m.x206*m.x206 - m.x856*m.b634 <= 0)
m.c858 = Constraint(expr=m.x207*m.x207 - m.x857*m.b634 <= 0)
m.c859 = Constraint(expr=m.x208*m.x208 - m.x858*m.b634 <= 0)
m.c860 = Constraint(expr=m.x209*m.x209 - m.x859*m.b634 <= 0)
m.c861 = Constraint(expr=m.x210*m.x210 - m.x860*m.b634 <= 0)
m.c862 = Constraint(expr=m.x211*m.x211 - m.x861*m.b634 <= 0)
m.c863 = Constraint(expr=m.x212*m.x212 - m.x862*m.b634 <= 0)
m.c864 = Constraint(expr=m.x213*m.x213 - m.x863*m.b634 <= 0)
m.c865 = Constraint(expr=m.x214*m.x214 - m.x864*m.b634 <= 0)
m.c866 = Constraint(expr=m.x215*m.x215 - m.x865*m.b634 <= 0)
m.c867 = Constraint(expr=m.x216*m.x216 - | |
= b''
if self.decrypt_packet_num == 0:
logging.info('auth_sha1_v4: over size')
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data error')
if length > len(self.recv_buf):
break
if struct.pack('<I', zlib.adler32(self.recv_buf[:length - 4]) & 0xFFFFFFFF) != self.recv_buf[length - 4:length]:
logging.info('auth_sha1_v4: checksum error, data %s' % (binascii.hexlify(self.recv_buf[:length]),))
self.raw_trans = True
self.recv_buf = b''
if self.decrypt_packet_num == 0:
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data uncorrect checksum')
pos = common.ord(self.recv_buf[4])
if pos < 255:
pos += 4
else:
pos = struct.unpack('>H', self.recv_buf[5:7])[0] + 4
out_buf += self.recv_buf[pos:length - 4]
self.recv_buf = self.recv_buf[length:]
if pos == length - 4:
sendback = True
if out_buf:
self.server_info.data.update(self.client_id, self.connection_id)
self.decrypt_packet_num += 1
return (out_buf, sendback)
class auth_aes128(auth_base):
def __init__(self, method):
super(auth_aes128, self).__init__(method)
self.recv_buf = b''
self.unit_len = 8100
self.raw_trans = False
self.has_sent_header = False
self.has_recv_header = False
self.client_id = 0
self.connection_id = 0
self.max_time_dif = 60 * 60 * 24 # time dif (second) setting
self.salt = b"<PASSWORD>"
self.no_compatible_method = 'auth_aes128'
self.extra_wait_size = struct.unpack('>H', os.urandom(2))[0] % 1024
self.pack_id = 0
self.recv_id = 0
def init_data(self):
return obfs_auth_v2_data()
def set_server_info(self, server_info):
self.server_info = server_info
try:
max_client = int(server_info.protocol_param)
except:
max_client = 64
self.server_info.data.set_max_client(max_client)
def rnd_data(self, buf_size):
if buf_size > 1200:
return b'\x01'
if self.pack_id > 4:
rnd_data = os.urandom(common.ord(os.urandom(1)[0]) % 32)
elif buf_size > 900:
rnd_data = os.urandom(common.ord(os.urandom(1)[0]) % 128)
else:
rnd_data = os.urandom(struct.unpack('>H', os.urandom(2))[0] % 512)
if len(rnd_data) < 128:
return common.chr(len(rnd_data) + 1) + rnd_data
else:
return common.chr(255) + struct.pack('<H', len(rnd_data) + 3) + rnd_data
def pack_data(self, buf):
data = self.rnd_data(len(buf)) + buf
data_len = len(data) + 8
crc = binascii.crc32(struct.pack('<H', data_len)) & 0xFFFF
data = struct.pack('<H', crc) + data
data = struct.pack('<H', data_len) + data
adler32 = (zlib.adler32(data) & 0xFFFFFFFF) ^ self.pack_id
self.pack_id = (self.pack_id + 1) & 0xFFFFFFFF
data += struct.pack('<I', adler32)
return data
def pack_auth_data(self, auth_data, buf):
if len(buf) == 0:
return b''
if len(buf) > 400:
rnd_len = common.ord(os.urandom(1)[0]) % 512
else:
rnd_len = struct.unpack('<H', os.urandom(2))[0] % 1024
data = auth_data
data_len = 4 + 16 + 10 + len(buf) + rnd_len + 4
data = data + struct.pack('<H', data_len) + struct.pack('<H', rnd_len)
uid = os.urandom(4)
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(uid + self.server_info.key)) + self.salt, 'aes-128-cbc', b'\x00' * 16)
data = uid + encryptor.encrypt(data)[16:]
data += hmac.new(self.server_info.iv + self.server_info.key, data, hashlib.sha1).digest()[:10]
data += os.urandom(rnd_len) + buf
data += struct.pack('<I', (zlib.adler32(data) & 0xFFFFFFFF))
return data
def auth_data(self):
utc_time = int(time.time()) & 0xFFFFFFFF
if self.server_info.data.connection_id > 0xFF000000:
self.server_info.data.local_client_id = b''
if not self.server_info.data.local_client_id:
self.server_info.data.local_client_id = os.urandom(4)
logging.debug("local_client_id %s" % (binascii.hexlify(self.server_info.data.local_client_id),))
self.server_info.data.connection_id = struct.unpack('<I', os.urandom(4))[0] & 0xFFFFFF
self.server_info.data.connection_id += 1
return b''.join([struct.pack('<I', utc_time),
self.server_info.data.local_client_id,
struct.pack('<I', self.server_info.data.connection_id)])
def client_pre_encrypt(self, buf):
ret = b''
if not self.has_sent_header:
head_size = self.get_head_size(buf, 30)
datalen = min(len(buf), random.randint(0, 31) + head_size)
ret += self.pack_auth_data(self.auth_data(), buf[:datalen])
buf = buf[datalen:]
self.has_sent_header = True
while len(buf) > self.unit_len:
ret += self.pack_data(buf[:self.unit_len])
buf = buf[self.unit_len:]
ret += self.pack_data(buf)
return ret
def client_post_decrypt(self, buf):
if self.raw_trans:
return buf
self.recv_buf += buf
out_buf = b''
while len(self.recv_buf) > 4:
crc = struct.pack('<H', binascii.crc32(self.recv_buf[:2]) & 0xFFFF)
if crc != self.recv_buf[2:4]:
raise Exception('client_post_decrypt data uncorrect crc')
length = struct.unpack('<H', self.recv_buf[:2])[0]
if length >= 8192 or length < 7:
self.raw_trans = True
self.recv_buf = b''
raise Exception('client_post_decrypt data error')
if length > len(self.recv_buf):
break
if struct.pack('<I', (zlib.adler32(self.recv_buf[:length - 4]) & 0xFFFFFFFF) ^ self.recv_id) != self.recv_buf[length - 4:length]:
self.raw_trans = True
self.recv_buf = b''
raise Exception('client_post_decrypt data uncorrect checksum')
self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF
pos = common.ord(self.recv_buf[4])
if pos < 255:
pos += 4
else:
pos = struct.unpack('<H', self.recv_buf[5:7])[0] + 4
out_buf += self.recv_buf[pos:length - 4]
self.recv_buf = self.recv_buf[length:]
return out_buf
def server_pre_encrypt(self, buf):
if self.raw_trans:
return buf
ret = b''
while len(buf) > self.unit_len:
ret += self.pack_data(buf[:self.unit_len])
buf = buf[self.unit_len:]
ret += self.pack_data(buf)
return ret
def server_post_decrypt(self, buf):
if self.raw_trans:
return (buf, False)
self.recv_buf += buf
out_buf = b''
sendback = False
if not self.has_recv_header:
if len(self.recv_buf) < 30:
return (b'', False)
sha1data = hmac.new(self.server_info.recv_iv + self.server_info.key, self.recv_buf[:20], hashlib.sha1).digest()[:10]
if sha1data != self.recv_buf[20:30]:
logging.error('auth_aes128 data uncorrect auth HMAC-SHA1 from %s:%d, data %s' % (self.server_info.client, self.server_info.client_port, binascii.hexlify(self.recv_buf)))
if len(self.recv_buf) < 30 + self.extra_wait_size:
return (b'', False)
return self.not_match_return(self.recv_buf)
user_key = self.recv_buf[:4]
encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(user_key + self.server_info.key)) + self.salt, 'aes-128-cbc')
head = encryptor.decrypt(b'\x00' * 16 + self.recv_buf[4:20] + b'\x00') # need an extra byte or recv empty
length = struct.unpack('<H', head[12:14])[0]
if len(self.recv_buf) < length:
return (b'', False)
utc_time = struct.unpack('<I', head[:4])[0]
client_id = struct.unpack('<I', head[4:8])[0]
connection_id = struct.unpack('<I', head[8:12])[0]
rnd_len = struct.unpack('<H', head[14:16])[0]
if struct.pack('<I', zlib.adler32(self.recv_buf[:length - 4]) & 0xFFFFFFFF) != self.recv_buf[length - 4:length]:
logging.info('auth_aes128: checksum error, data %s' % (binascii.hexlify(self.recv_buf[:length]),))
return self.not_match_return(self.recv_buf)
time_dif = common.int32(utc_time - (int(time.time()) & 0xffffffff))
if time_dif < -self.max_time_dif or time_dif > self.max_time_dif:
logging.info('auth_aes128: wrong timestamp, time_dif %d, data %s' % (time_dif, binascii.hexlify(head),))
return self.not_match_return(self.recv_buf)
elif self.server_info.data.insert(client_id, connection_id):
self.has_recv_header = True
out_buf = self.recv_buf[30 + rnd_len:length - 4]
self.client_id = client_id
self.connection_id = connection_id
else:
logging.info('auth_aes128: auth fail, data %s' % (binascii.hexlify(out_buf),))
return self.not_match_return(self.recv_buf)
self.recv_buf = self.recv_buf[length:]
self.has_recv_header = True
sendback = True
while len(self.recv_buf) > 4:
crc = struct.pack('<H', binascii.crc32(self.recv_buf[:2]) & 0xFFFF)
if crc != self.recv_buf[2:4]:
self.raw_trans = True
logging.info('auth_aes128: wrong crc')
if self.recv_id == 0:
logging.info('auth_aes128: wrong crc')
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data error')
length = struct.unpack('<H', self.recv_buf[:2])[0]
if length >= 8192 or length < 7:
self.raw_trans = True
self.recv_buf = b''
if self.recv_id == 0:
logging.info('auth_aes128: over size')
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data error')
if length > len(self.recv_buf):
break
if struct.pack('<I', (zlib.adler32(self.recv_buf[:length - 4]) & 0xFFFFFFFF) ^ self.recv_id) != self.recv_buf[length - 4:length]:
logging.info('auth_aes128: checksum error, data %s' % (binascii.hexlify(self.recv_buf[:length]),))
self.raw_trans = True
self.recv_buf = b''
if self.recv_id == 0:
return (b'E'*2048, False)
else:
raise Exception('server_post_decrype data uncorrect checksum')
self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF
pos = common.ord(self.recv_buf[4])
if pos < 255:
pos += 4
else:
pos = struct.unpack('<H', self.recv_buf[5:7])[0] + 4
out_buf += self.recv_buf[pos:length - 4]
self.recv_buf = self.recv_buf[length:]
if pos == length - 4:
sendback = True
if out_buf:
self.server_info.data.update(self.client_id, self.connection_id)
return (out_buf, sendback)
def client_udp_pre_encrypt(self, buf):
return buf + struct.pack('<I', zlib.adler32(buf) & 0xFFFFFFFF)
def client_udp_post_decrypt(self, buf):
length = len(buf)
data = buf[:-4]
if struct.pack('<I', zlib.adler32(data) & 0xFFFFFFFF) != buf[length - 4:]:
return b''
return data
def server_udp_pre_encrypt(self, buf):
return buf + struct.pack('<I', zlib.adler32(buf) & 0xFFFFFFFF)
def server_udp_post_decrypt(self, buf):
length = len(buf)
data = buf[:-4]
if struct.pack('<I', zlib.adler32(data) & 0xFFFFFFFF) != buf[length - 4:]:
return (b'', None)
return (data, None)
class obfs_auth_mu_data(object):
def __init__(self):
self.user_id = {}
self.local_client_id = b''
self.connection_id = 0
self.set_max_client(64) # max active client count
def update(self, user_id, client_id, connection_id):
if user_id not in self.user_id:
self.user_id[user_id] = lru_cache.LRUCache()
local_client_id = self.user_id[user_id]
if client_id in local_client_id:
local_client_id[client_id].update()
def set_max_client(self, max_client):
self.max_client = max_client
self.max_buffer = max(self.max_client * 2, 1024)
def insert(self, user_id, client_id, connection_id):
if user_id not in self.user_id:
self.user_id[user_id] = lru_cache.LRUCache()
local_client_id = self.user_id[user_id]
if local_client_id.get(client_id, None) is None or not local_client_id[client_id].enable:
if local_client_id.first() is None or len(local_client_id) < self.max_client:
if client_id not in local_client_id:
#TODO: check
local_client_id[client_id] = client_queue(connection_id)
else:
local_client_id[client_id].re_enable(connection_id)
return local_client_id[client_id].insert(connection_id)
if not local_client_id[local_client_id.first()].is_active():
del local_client_id[local_client_id.first()]
if client_id not in local_client_id:
#TODO: check
local_client_id[client_id] = client_queue(connection_id)
else:
local_client_id[client_id].re_enable(connection_id)
return local_client_id[client_id].insert(connection_id)
logging.warn('auth_aes128: no inactive client')
return False
else:
return local_client_id[client_id].insert(connection_id)
class auth_aes128_sha1(auth_base):
def __init__(self, method, hashfunc):
super(auth_aes128_sha1, self).__init__(method)
self.hashfunc = hashfunc
self.recv_buf = b''
self.unit_len = 8100
self.raw_trans = False
self.has_sent_header = False
self.has_recv_header = False
self.client_id = 0
self.connection_id = 0
self.max_time_dif = 60 * 60 * 24 # time dif (second) setting
self.salt = hashfunc == hashlib.md5 and b"auth_aes128_md5" or b"auth_aes128_sha1"
self.no_compatible_method = hashfunc == hashlib.md5 and "auth_aes128_md5" or 'auth_aes128_sha1'
self.extra_wait_size = struct.unpack('>H', os.urandom(2))[0] % 1024
self.pack_id = 1
self.recv_id = 1
self.user_id = None
self.user_key = None
self.last_rnd_len = 0
def init_data(self):
return obfs_auth_mu_data()
def set_server_info(self, server_info):
self.server_info = server_info
try:
max_client = int(server_info.protocol_param.split('#')[0])
except:
max_client = 64
self.server_info.data.set_max_client(max_client)
def trapezoid_random_float(self, d):
if d == 0:
return random.random()
s = random.random()
a = 1 - d
return (math.sqrt(a * a + 4 * d * | |
import math
import numpy as np
import torch
import torch.nn as nn
from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules
from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils
from ....utils import common_utils
from ...backbones_2d.transformer import TransformerEncoderLayer3D, TransformerEncoder
from ...roi_heads.target_assigner.proposal_target_layer import ProposalTargetLayer
from ...model_utils.model_nms_utils import class_agnostic_nms
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
def sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000):
"""
Args:
rois: (M, 7 + C)
points: (N, 3)
sample_radius_with_roi:
num_max_points_of_part:
Returns:
sampled_points: (N_out, 3)
"""
if points.shape[0] < num_max_points_of_part:
distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
point_mask = min_dis < roi_max_dim + sample_radius_with_roi
else:
start_idx = 0
point_mask_list = []
while start_idx < points.shape[0]:
distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1)
min_dis, min_dis_roi_idx = distance.min(dim=-1)
roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1)
cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi
point_mask_list.append(cur_point_mask)
start_idx += num_max_points_of_part
point_mask = torch.cat(point_mask_list, dim=0)
sampled_points = points[:1] if point_mask.sum() == 0 else points[point_mask, :]
return sampled_points, point_mask
def sector_fps(points, num_sampled_points, num_sectors):
"""
Args:
points: (N, 3)
num_sampled_points: int
num_sectors: int
Returns:
sampled_points: (N_out, 3)
"""
sector_size = np.pi * 2 / num_sectors
point_angles = torch.atan2(points[:, 1], points[:, 0]) + np.pi
sector_idx = (point_angles / sector_size).floor().clamp(min=0, max=num_sectors)
xyz_points_list = []
xyz_batch_cnt = []
num_sampled_points_list = []
for k in range(num_sectors):
mask = (sector_idx == k)
cur_num_points = mask.sum().item()
if cur_num_points > 0:
xyz_points_list.append(points[mask])
xyz_batch_cnt.append(cur_num_points)
ratio = cur_num_points / points.shape[0]
num_sampled_points_list.append(
min(cur_num_points, math.ceil(ratio * num_sampled_points))
)
if len(xyz_batch_cnt) == 0:
xyz_points_list.append(points)
xyz_batch_cnt.append(len(points))
num_sampled_points_list.append(num_sampled_points)
print(f'Warning: empty sector points detected in SectorFPS: points.shape={points.shape}')
xyz = torch.cat(xyz_points_list, dim=0)
xyz_batch_cnt = torch.tensor(xyz_batch_cnt, device=points.device).int()
sampled_points_batch_cnt = torch.tensor(num_sampled_points_list, device=points.device).int()
sampled_pt_idxs = pointnet2_stack_utils.stack_farthest_point_sample(
xyz.contiguous(), xyz_batch_cnt, sampled_points_batch_cnt
).long()
sampled_points = xyz[sampled_pt_idxs]
return sampled_points
class VoxelSetAbstractionTransFusionv5(nn.Module):
def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None,
num_rawpoint_features=None, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
SA_cfg = self.model_cfg.SA_LAYER
self.SA_layers = nn.ModuleList()
self.linears_in = nn.ModuleList()
self.linears_out = nn.ModuleList()
self.fusion_channel = sum([x[-1] for x in SA_cfg[self.model_cfg.FEATURES_SOURCE[-2]].MLPS])
# self.fusion_channel = 16
self.SA_layer_names = []
self.downsample_times_map = {}
c_in = 0
if 'bev' in self.model_cfg.FEATURES_SOURCE:
c_bev = num_bev_features
c_in += c_bev
if c_bev == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(c_bev, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, c_bev, bias=False),
nn.BatchNorm1d(c_bev)))
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
mlps = SA_cfg['raw_points'].MLPS
for k in range(len(mlps)):
mlps[k] = [num_rawpoint_features - 3] + mlps[k]
self.SA_rawpoints = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg['raw_points'].POOL_RADIUS,
nsamples=SA_cfg['raw_points'].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method='max_pool'
)
cur = sum([x[-1] for x in mlps])
if cur == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(cur, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, cur, bias=False),
nn.BatchNorm1d(cur)))
c_in += cur
for src_name in self.model_cfg.FEATURES_SOURCE:
if src_name in ['bev', 'raw_points']:
continue
self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR
mlps = SA_cfg[src_name].MLPS
for k in range(len(mlps)):
mlps[k] = [mlps[k][0]] + mlps[k]
cur_layer = pointnet2_stack_modules.StackSAModuleMSG(
radii=SA_cfg[src_name].POOL_RADIUS,
nsamples=SA_cfg[src_name].NSAMPLE,
mlps=mlps,
use_xyz=True,
pool_method='max_pool',
)
self.SA_layers.append(cur_layer)
cur = sum([x[-1] for x in mlps])
if cur == self.fusion_channel:
self.linears_in.append(nn.Identity())
self.linears_out.append(nn.Identity())
else:
self.linears_in.append(nn.Sequential(
nn.Linear(cur, self.fusion_channel, bias=False),
nn.BatchNorm1d(self.fusion_channel)))
self.linears_out.append(nn.Sequential(
nn.Linear(self.fusion_channel, cur, bias=False),
nn.BatchNorm1d(cur)))
self.SA_layer_names.append(src_name)
c_in += cur
self.vsa_point_feature_fusion = nn.Sequential(
nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False),
nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES),
nn.ReLU(),
)
self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES
self.num_point_features_before_fusion = c_in
if self.model_cfg.NORM:
self.transnorm = nn.LayerNorm(c_in)
else:
self.transnorm = None
if self.model_cfg.NORM2:
self.transnorm2 = nn.LayerNorm(self.fusion_channel)
else:
self.transnorm2 = None
# multi_location
self.trans_layer = TransformerEncoder(TransformerEncoderLayer3D(c_in, self.model_cfg.FUSION_HEAD), self.model_cfg.NUM_LAYERS, self.transnorm)
# have multi-modality + multi-scale
self.trans_fusion_layer = TransformerEncoder(TransformerEncoderLayer3D(self.fusion_channel, self.model_cfg.FUSION2_HEAD), self.model_cfg.NUM_LAYERS2, self.transnorm2)
self.reduce_radius = self.model_cfg.REDUCE_RADIUS**2
self.topks = self.model_cfg.NMS_CONFIG.TOPK
self.max_keypoints = self.model_cfg.NMS_CONFIG.MAX_POINTS
self.res1_actn_1 = nn.Sequential(
nn.LayerNorm(c_in),
nn.ReLU())
self.res1_actn_2 = nn.Sequential(
nn.LayerNorm(c_in),
nn.ReLU())
def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride):
x_idxs = (keypoints[:, :, 0] - self.point_cloud_range[0]) / self.voxel_size[0]
y_idxs = (keypoints[:, :, 1] - self.point_cloud_range[1]) / self.voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
cur_x_idxs = x_idxs[k]
cur_y_idxs = y_idxs[k]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features.unsqueeze(dim=0))
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (B, N, C0)
return point_bev_features
def get_sampled_points(self, batch_dict):
batch_size = batch_dict['batch_size']
if self.model_cfg.POINT_SOURCE == 'raw_points':
src_points = batch_dict['points'][:, 1:4]
batch_indices = batch_dict['points'][:, 0].long()
elif self.model_cfg.POINT_SOURCE == 'voxel_centers':
src_points = common_utils.get_voxel_centers(
batch_dict['voxel_coords'][:, 1:4],
downsample_times=1,
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_indices = batch_dict['voxel_coords'][:, 0].long()
else:
raise NotImplementedError
keypoints_list = []
for bs_idx in range(batch_size):
bs_mask = (batch_indices == bs_idx)
sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3)
if self.model_cfg.SAMPLE_METHOD == 'FPS':
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS
).long()
if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS:
empty_num = self.model_cfg.NUM_KEYPOINTS - sampled_points.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
elif self.model_cfg.SAMPLE_METHOD == 'FastFPS':
raise NotImplementedError
else:
raise NotImplementedError
keypoints_list.append(keypoints)
keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoints
def get_sampled_points_post(self, batch_dict, keypoints):
batch_size = batch_dict['batch_size']
src_points = keypoints
keypoints_list = []
for bs_idx in range(batch_size):
sampled_points = src_points[bs_idx].unsqueeze(dim=0) # (1, N, 3)
if sampled_points.shape[1] < self.max_keypoints:
cur_count = sampled_points.shape[1]
cur_pt_idxs = torch.arange(0, self.max_keypoints)
empty_num = self.max_keypoints - cur_count
while empty_num >= cur_count:
cur_pt_idxs[cur_count:cur_count * 2] = cur_pt_idxs[:cur_count]
empty_num -= cur_count
cur_count *= 2
if cur_count < self.max_keypoints:
assert empty_num == self.max_keypoints - cur_count
cur_pt_idxs[-empty_num:] = cur_pt_idxs[:empty_num]
keypoint = sampled_points[0][cur_pt_idxs].unsqueeze(dim=0)
else:
cur_pt_idxs = pointnet2_stack_utils.furthest_point_sample(
sampled_points[:, :, 0:3].contiguous(), self.max_keypoints
).long()
if sampled_points.shape[1] < self.max_keypoints:
empty_num = self.max_keypoints - sampled_points.shape[1]
cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num]
keypoint = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0)
keypoints_list.append(keypoint)
keypoint = torch.cat(keypoints_list, dim=0) # (B, M, 3)
return keypoint
def reduce_points(self, batch_dict):
batch_indices = batch_dict['points'][:, 0].long()
masks = []
for bs_idx, roi in enumerate(batch_dict['batch_cls_preds']):
bs_mask = (batch_indices == bs_idx)
pts = batch_dict['points'][bs_mask].unsqueeze(dim=1)[:, :, 1: 4] # (N, 1, 3)
s, _ = torch.max(batch_dict['batch_cls_preds'][bs_idx], dim=1)
top, idx = torch.topk(s, self.topks)
c = batch_dict['batch_box_preds'][bs_idx][idx][:, :3].unsqueeze(dim=0)
dist = (pts - c)**2
dist, _ = dist.sum(dim=-1).min(dim=1)
mask = (dist <= self.reduce_radius)
masks.extend(mask)
batch_dict['points'] = batch_dict['points'][masks]
return batch_dict
def reduce_points_post(self, keypoints, batch_dict):
keypoints_list = []
for bs_idx, roi in enumerate(batch_dict['batch_cls_preds']):
pts = keypoints[bs_idx].unsqueeze(dim=1)
s, _ = torch.max(batch_dict['batch_cls_preds'][bs_idx], dim=1)
top, idx = torch.topk(s, self.topks)
c = batch_dict['batch_box_preds'][bs_idx][idx][:, :3].unsqueeze(dim=0)
dist = (pts - c)**2
dist, _ = dist.sum(dim=-1).min(dim=1)
mask = (dist <= self.reduce_radius)
keypoints_list.append(keypoints[bs_idx][mask])
return keypoints_list
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
keypoints: (B, num_keypoints, 3)
multi_scale_3d_features: {
'x_conv4': ...
}
points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...]
spatial_features: optional
spatial_features_stride: optional
Returns:
point_features: (N, C)
point_coords: (N, 4)
"""
if self.model_cfg.POINT_SOURCE == 'raw_points' and self.reduce_radius > 0:
# batch_dict = self.reduce_points(batch_dict)
keypoints = self.get_sampled_points(batch_dict)
keypoint_lst = self.reduce_points_post(keypoints, batch_dict)
keypoints = self.get_sampled_points_post(batch_dict, keypoint_lst)
else:
keypoints = self.get_sampled_points(batch_dict)
point_features_list = []
if 'bev' in self.model_cfg.FEATURES_SOURCE:
point_bev_features = self.interpolate_from_bev_features(
keypoints, batch_dict['spatial_features'], batch_dict['batch_size'],
bev_stride=batch_dict['spatial_features_stride']
)
point_features_list.append(point_bev_features)
batch_size, num_keypoints, _ = keypoints.shape
new_xyz = keypoints.view(-1, 3)
new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int().fill_(num_keypoints)
if 'raw_points' in self.model_cfg.FEATURES_SOURCE:
raw_points = batch_dict['points']
xyz = raw_points[:, 1:4]
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (raw_points[:, 0] == bs_idx).sum()
point_features = raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None
pooled_points, pooled_features = self.SA_rawpoints(
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=point_features,
)
point_features_list.append(pooled_features.view(batch_size, num_keypoints, -1))
for k, src_name in enumerate(self.SA_layer_names):
cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices
xyz = common_utils.get_voxel_centers(
cur_coords[:, 1:4],
downsample_times=self.downsample_times_map[src_name],
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
xyz_batch_cnt = xyz.new_zeros(batch_size).int()
for bs_idx in range(batch_size):
xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum()
pooled_points, pooled_features = self.SA_layers[k](
xyz=xyz.contiguous(),
xyz_batch_cnt=xyz_batch_cnt,
new_xyz=new_xyz,
new_xyz_batch_cnt=new_xyz_batch_cnt,
features=batch_dict['multi_scale_3d_features'][src_name].features.contiguous(),
)
point_features_list.append(pooled_features.view(batch_size, num_keypoints, -1))
point_features_list_new = []
for i, x in enumerate(point_features_list):
feat = self.linears_in[i](x.view(batch_size * num_keypoints, -1))
point_features_list_new.append(feat.view(1, batch_size * num_keypoints, -1))
fusion_feat = torch.cat(point_features_list_new, dim=0)
# have multi-modality + multi-scale
trans1_feat_list = self.trans_fusion_layer(fusion_feat).view(len(fusion_feat), batch_size, num_keypoints, -1)
trans1_feat_projected_list = []
| |
<reponame>shivharis/pybind
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class extended_data(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-ssm-operational - based on the path /acl-state/vxlan-acl/extended-data. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__acl_name','__seq_num','__permit_deny','__dst_vtep_ip','__src_vtep_ip','__vni','__vni_mask','__native_tag','__dst_ip','__dst_ip_mask','__src_ip','__src_ip_mask','__dst_port','__src_port','__count','__byte_count','__transit_name','__sflow','__redir_interface','__mirror_interface',)
_yang_name = 'extended-data'
_rest_name = 'extended-data'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="count", rest_name="count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint64', is_config=False)
self.__sflow = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="sflow", rest_name="sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='boolean', is_config=False)
self.__src_port = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-port", rest_name="src-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
self.__native_tag = YANGDynClass(base=unicode, is_leaf=True, yang_name="native-tag", rest_name="native-tag", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__acl_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="acl-name", rest_name="acl-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__seq_num = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seq-num", rest_name="seq-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint32', is_config=False)
self.__vni_mask = YANGDynClass(base=unicode, is_leaf=True, yang_name="vni-mask", rest_name="vni-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__redir_interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="redir-interface", rest_name="redir-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__vni = YANGDynClass(base=unicode, is_leaf=True, yang_name="vni", rest_name="vni", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__byte_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="byte-count", rest_name="byte-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint64', is_config=False)
self.__src_ip = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-ip", rest_name="src-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
self.__permit_deny = YANGDynClass(base=unicode, is_leaf=True, yang_name="permit-deny", rest_name="permit-deny", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__dst_port = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="dst-port", rest_name="dst-port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
self.__dst_ip_mask = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="dst-ip-mask", rest_name="dst-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
self.__dst_ip = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-ip", rest_name="dst-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
self.__src_ip_mask = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="src-ip-mask", rest_name="src-ip-mask", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint16', is_config=False)
self.__src_vtep_ip = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-vtep-ip", rest_name="src-vtep-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
self.__dst_vtep_ip = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
self.__mirror_interface = YANGDynClass(base=unicode, is_leaf=True, yang_name="mirror-interface", rest_name="mirror-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
self.__transit_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="transit-name", rest_name="transit-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'acl-state', u'vxlan-acl', u'extended-data']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'acl-state', u'vxlan-acl', u'extended-data']
def _get_acl_name(self):
"""
Getter method for acl_name, mapped from YANG variable /acl_state/vxlan_acl/extended_data/acl_name (string)
YANG Description: input_Acl_name
"""
return self.__acl_name
def _set_acl_name(self, v, load=False):
"""
Setter method for acl_name, mapped from YANG variable /acl_state/vxlan_acl/extended_data/acl_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_acl_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_acl_name() directly.
YANG Description: input_Acl_name
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="acl-name", rest_name="acl-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """acl_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="acl-name", rest_name="acl-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)""",
})
self.__acl_name = t
if hasattr(self, '_set'):
self._set()
def _unset_acl_name(self):
self.__acl_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="acl-name", rest_name="acl-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
def _get_seq_num(self):
"""
Getter method for seq_num, mapped from YANG variable /acl_state/vxlan_acl/extended_data/seq_num (uint32)
YANG Description: sequence number
"""
return self.__seq_num
def _set_seq_num(self, v, load=False):
"""
Setter method for seq_num, mapped from YANG variable /acl_state/vxlan_acl/extended_data/seq_num (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_seq_num is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_seq_num() directly.
YANG Description: sequence number
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seq-num", rest_name="seq-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """seq_num must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seq-num", rest_name="seq-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint32', is_config=False)""",
})
self.__seq_num = t
if hasattr(self, '_set'):
self._set()
def _unset_seq_num(self):
self.__seq_num = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="seq-num", rest_name="seq-num", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='uint32', is_config=False)
def _get_permit_deny(self):
"""
Getter method for permit_deny, mapped from YANG variable /acl_state/vxlan_acl/extended_data/permit_deny (string)
YANG Description: permit or deny
"""
return self.__permit_deny
def _set_permit_deny(self, v, load=False):
"""
Setter method for permit_deny, mapped from YANG variable /acl_state/vxlan_acl/extended_data/permit_deny (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_permit_deny is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_permit_deny() directly.
YANG Description: permit or deny
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="permit-deny", rest_name="permit-deny", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """permit_deny must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="permit-deny", rest_name="permit-deny", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)""",
})
self.__permit_deny = t
if hasattr(self, '_set'):
self._set()
def _unset_permit_deny(self):
self.__permit_deny = YANGDynClass(base=unicode, is_leaf=True, yang_name="permit-deny", rest_name="permit-deny", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='string', is_config=False)
def _get_dst_vtep_ip(self):
"""
Getter method for dst_vtep_ip, mapped from YANG variable /acl_state/vxlan_acl/extended_data/dst_vtep_ip (inet:ipv4-address)
YANG Description: dst vtep ip or any
"""
return self.__dst_vtep_ip
def _set_dst_vtep_ip(self, v, load=False):
"""
Setter method for dst_vtep_ip, mapped from YANG variable /acl_state/vxlan_acl/extended_data/dst_vtep_ip (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_dst_vtep_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dst_vtep_ip() directly.
YANG Description: dst vtep ip or any
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dst_vtep_ip must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)""",
})
self.__dst_vtep_ip = t
if hasattr(self, '_set'):
self._set()
def _unset_dst_vtep_ip(self):
self.__dst_vtep_ip = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="dst-vtep-ip", rest_name="dst-vtep-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-ssm-operational', defining_module='brocade-ssm-operational', yang_type='inet:ipv4-address', is_config=False)
def _get_src_vtep_ip(self):
"""
Getter method for src_vtep_ip, mapped from YANG variable /acl_state/vxlan_acl/extended_data/src_vtep_ip (inet:ipv4-address)
YANG Description: src vtep ip or any
"""
return self.__src_vtep_ip
def _set_src_vtep_ip(self, v, load=False):
"""
Setter | |
"""
Models and functions for tracking XBlog Objects:
- Post
- Author
- Blog
- Category
- Tag
"""
import logging
import os
import string
import bs4
import markdown2
import django.utils.timezone
import random
try:
from urllib.parse import urlparse, urljoin
except ImportError:
from urlparse import urlparse, urljoin
from django.db import models
# from django.core.mail import send_mail
from django.core.exceptions import PermissionDenied
from django.core.validators import MinLengthValidator
from django.utils.text import Truncator
from django.utils.html import linebreaks
from django.utils.encoding import python_2_unicode_compatible
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.sites.managers import CurrentSiteManager
from django.forms import ModelForm
from django.db.models.signals import post_save
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
# from django.contrib.auth import get_user_model
try:
from django.urls import reverse
except ImportError: # django < 2
from django.core.urlresolvers import reverse
from .external.postutils import SlugifyUniquely
from .external import fuzzyclock
from .external import text_stats
LOGGER = logging.getLogger(__name__)
def create_profile(*args, **kwargs):
"""
Creates a user profile for new users
assigns an author instance
"""
LOGGER.debug('%s.create_profile entered', __name__)
LOGGER.debug('args: %s', str(args))
user = kwargs["instance"]
# if kwargs["created"]:
# # check if the profile already exists
if hasattr(user, 'author'):
LOGGER.info('Author profile exists, skipping')
return
else:
userprofile = Author(user=user)
userprofile.save()
post_save.connect(create_profile, sender=settings.AUTH_USER_MODEL)
def random_string(length=24):
"""
generates a random string of characters for
an API key, for example.
"""
# create a pool of 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
pool = list(string.ascii_uppercase) + list(string.ascii_lowercase) + [str(i) for i in range(0, 10)]
# hmm... wouldn't it have been shorter to set pool to
# ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789 ?
# next time...
# res = "".join([pool[ord(c[0])) % len(pool)] for c in os.urandom(length)])
res = ''.join(random.choice(pool) for _ in range(length))
return res
STATUS_CHOICES = (('draft', 'Draft'), ('publish', 'Published'), ('private', 'Private'))
FORMAT_CHOICES = (('standard', 'Standard'), ('video', 'Video'), ('status', 'Status'),)
# text FILTERS
FILTER_CHOICES = (
('markdown', 'Markdown'),
('html', 'HTML'),
('convert linebreaks', 'Convert linebreaks')
)
FILTERS = {}
def get_markdown(data):
"""
# m = Markdown(data,
# extensions=['footnotes'],
# # extension_configs= {'footnotes' : ('PLACE_MARKER','~~~~~~~~')},
# encoding='utf8',
# safe_mode = False
# )
# res = m.toString()
# res = smartyPants(res, "1qb")
"""
LOGGER.debug("%s.get_markdown entered", __name__)
res = markdown2.markdown(data, extras=['footnotes', 'fenced-code-blocks', 'smartypants'])
# LOGGER.debug("res: %s" % res)
return res
FILTERS['markdown'] = get_markdown
def get_html(data):
"""
used when the post is written in standard HTML
might be a good place to clean up / validate HTML to
keep it from breaking the site..?
"""
LOGGER.debug("%s.get_html entered", __name__)
# just return it.
# maybe tidy it up or something...
# data = smartyPants(data, "1qb")
return data
FILTERS['html'] = get_html
def convert_linebreaks(data):
"""
The most basic filter, just translates linebreaks to
<br>'s. This is pants.
"""
LOGGER.debug("%s.convert_linebreaks entered", __name__)
data = linebreaks(data)
# return smartyPants(data,"1qb")
return data
FILTERS['convert linebreaks'] = convert_linebreaks
FILTERS['__default__'] = get_markdown
@python_2_unicode_compatible
class LinkCategory(models.Model):
"""Categories for the blogroll"""
title = models.CharField(blank=True, max_length=255)
description = models.TextField(blank=True)
visible = models.BooleanField(default=True)
blog = models.ForeignKey('Blog', on_delete=models.CASCADE)
display_order = models.IntegerField(blank=True, null=True)
def __str__(self):
if self.title != '':
return str(self.title)
else:
return str('Untitled Link Category %d' % self.id)
__repr__ = __str__
@python_2_unicode_compatible
class Link(models.Model):
"""Blogroll Struct"""
url = models.URLField(blank=True)
link_name = models.CharField(blank=True, max_length=255)
link_image = models.ImageField(upload_to="blog_uploads/links/",
height_field='link_image_height',
width_field='link_image_width',
blank=True)
link_image_height = models.IntegerField(blank=True, null=True)
link_image_width = models.IntegerField(blank=True, null=True)
description = models.TextField(blank=True)
visible = models.BooleanField(default=True)
blog = models.ForeignKey('Blog', on_delete=models.CASCADE)
rss = models.URLField(blank=True)
category = models.ForeignKey('LinkCategory', on_delete=models.CASCADE)
def __str__(self):
return "%s (%s)" % (self.link_name, self.url)
__repr__ = __str__
@python_2_unicode_compatible
class Pingback(models.Model):
""" Replies are either pingbacks """
author_name = models.CharField(blank=True, max_length=100)
author_email = models.EmailField(blank=True)
post = models.ForeignKey('Post', on_delete=models.CASCADE)
title = models.CharField(blank=True, max_length=255)
body = models.TextField(blank=True)
is_public = models.BooleanField(default=False)
source_url = models.URLField(blank=True)
target_url = models.URLField(blank=True)
pub_date = models.DateTimeField(blank=True, default=django.utils.timezone.now)
mod_date = models.DateTimeField(blank=True, default=django.utils.timezone.now)
def __str__(self):
return "Reply %s -> %s" % (self.source_url, self.target_url)
__unicode__ = __str__
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
save override.
"""
LOGGER.debug("Pingback.save() entered: %s", str(self))
super(Pingback, self).save(force_insert=force_insert,
force_update=force_update, using=using,
update_fields=update_fields
)
mail_subject = "New Pingback from %s" % self.title
mail_body = """
Source URL: %s
Target URL: %s
Time: %s
""" % (self.source_url, self.target_url, self.pub_date)
LOGGER.debug('mail_subject: %s', mail_subject)
LOGGER.debug('mail_body: %s', mail_body)
# mail_managers(mail_subject, mail_body, fail_silently=False)
# send_mail(mail_subject, mail_body, "<EMAIL>", [self.post.author.email])
class Tag(models.Model):
"""(Tag description)"""
title = models.CharField(blank=True, max_length=100)
def __str__(self):
# return "%s (%s - %s)" % (self.title, self.source_url, self.target_url)
return self.title
__unicode__ = __str__
@python_2_unicode_compatible
class Author(models.Model):
"""User guy"""
fullname = models.CharField(blank=True, max_length=100)
url = models.URLField(blank=True)
avatar = models.ImageField(blank=True,
upload_to="avatars",
height_field='avatar_height',
width_field='avatar_width')
# user = models.ForeignKey(User, unique=True)
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
about = models.TextField(blank=True)
avatar_height = models.IntegerField(blank=True, null=True)
avatar_width = models.IntegerField(blank=True, null=True)
# API-related stuff
remote_access_enabled = models.BooleanField(default=False)
remote_access_key = models.CharField(blank=True,
max_length=100,
validators=[MinLengthValidator(8)])
def get_avatar_url(self):
"""
returns the avatar URL for this user
"""
LOGGER.debug("%s: %s", str(self), "Getting avatar url")
return self.avatar.url
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
special instructions on save
"""
if self.id:
if self.remote_access_enabled:
if not self.remote_access_key:
self.remote_access_key = random_string()
super(Author, self).save(force_insert=force_insert,
force_update=force_update,
using=using,
update_fields=update_fields
)
def __str__(self):
if self.fullname == '':
return str(self.user)
return self.fullname
def get_fullname(self):
"""
get_fullname will return something, even if fullname isn't set
"""
return str(self)
class Category(models.Model):
"""
Keeps track of post categories
"""
title = models.CharField(blank=False, max_length=255)
description = models.CharField(blank=True, max_length=100)
blog = models.ForeignKey('Blog', on_delete=models.CASCADE)
slug = models.SlugField(max_length=100)
def __unicode__(self):
return self.title
def get_absolute_url(self, absolute=False):
"""
setting absolute will prepened host's URL
"""
local_url = urljoin(self.blog.get_absolute_url(), self.slug)
# dumb
if local_url[-1] != "/":
local_url = local_url + "/"
if absolute:
return "http://%s" % self.blog.site.domain + local_url
return local_url
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Override save for Category
"""
if not self.slug or self.slug == '':
self.slug = SlugifyUniquely(self.title, self.__class__)
LOGGER.debug("%s.Category.save entered %s", __name__, self.title)
super(Category, self).save(force_insert=force_insert,
force_update=force_update,
using=using,
update_fields=update_fields
)
LOGGER.debug("category.save complete")
def __str__(self):
if self.title != '':
return str(self.title)
else:
return super(Category, self).__str__()
@python_2_unicode_compatible
class Post(models.Model):
"""A Blog Entry, natch"""
# metadata
pub_date = models.DateTimeField(blank=True, default=django.utils.timezone.now)
update_date = models.DateTimeField(blank=True, auto_now=True)
create_date = models.DateTimeField(blank=True, auto_now_add=True)
enable_comments = models.BooleanField(default=True)
# post content
title = models.CharField(blank=False, max_length=255)
slug = models.SlugField(max_length=100)
# adding, because it's a good idea (mainly for importing!)
guid = models.CharField(blank=True, max_length=255)
body = models.TextField(blank=True)
summary = models.TextField(blank=True)
categories = models.ManyToManyField(Category)
primary_category_name = models.ForeignKey(Category,
related_name='primary_category_set',
blank=True,
on_delete=models.CASCADE,
null=True)
tags = models.ManyToManyField(Tag, blank=True)
blog = models.ForeignKey('Blog', on_delete=models.CASCADE)
# author = models.ForeignKey(User)
author = models.ForeignKey('Author', on_delete=models.CASCADE)
status = models.CharField(blank=True,
null=True,
max_length=32,
choices=STATUS_CHOICES,
default="Draft")
# filter to display when "get_formatted_body" is called.
text_filter = models.CharField(blank=True,
max_length=100,
choices=FILTER_CHOICES,
default='__default__')
# format of this post
post_format = models.CharField(blank=True,
max_length=100,
choices=FORMAT_CHOICES,
default='standard')
def __str__(self):
return self.title
def comment_period_open(self):
""" determines if a post is too old..."""
# uncomment rest of line to set limit at 30 days.
# Sometimes I get hits on older entries, so I'll leave this one for now.
# consider adding:
# # and datetime.datetime.today() - datetime.timedelta(30) <= self.pub_date
return self.enable_comments
def prepopulate(self):
"""
sets up slug, etc.
"""
LOGGER.debug("prepopulate entered for %s", str(self))
if not self.slug or self.slug == '':
self.slug = SlugifyUniquely(self.title, self.__class__)
if not self.summary or self.summary == '':
# do an auto-summarize here.
# now what could that be?
pass
if not self.guid or self.guid == '':
self.guid = self.get_absolute_url()
def handle_technorati_tags(self):
"""
takes the post, and returns the technorati links in them...
from ecto:
"""
LOGGER.debug("handle_technorati_tags entered for %s", str(self))
start_tag = "<!-- technorati tags start -->"
end_tag = "<!-- technorati tags end -->"
text = self.body
start_idx = text.find(start_tag) + len(start_tag)
end_idx = text.find(end_tag)
if start_idx == -1 or end_idx == -1:
return
logging.debug("Got target text: starts at %s", str(start_idx))
logging.debug("Ends at %s", str(end_idx))
logging.debug("Got: %s", text[start_idx:end_idx])
soup = bs4.BeautifulSoup(text, 'html.parser')
tags = []
for anchor in soup.findAll('a'):
if "http://www.technorati.com/tag/" in anchor.get('href'):
# seems to be taggy
tags.append(anchor.string)
LOGGER.debug("Tags: %s", str(tags))
taglist = []
for tag in tags:
# try to find the tag
try:
taginstance = Tag.objects.get(title__iexact=tag)
LOGGER.info("Got Tag: '%s'", taginstance)
except Tag.DoesNotExist:
# not found, create tag
LOGGER.info("Creating '%s'", tag)
taginstance = Tag(title=tag)
taginstance.save()
taglist.append(taginstance)
self.tags = taglist
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
save override for Post model
"""
LOGGER.debug("Post.save entered for %s", str(self))
# make sure that person is allowed to create posts in this blog
if self.author.user != self.blog.owner and not self.author.user.is_superuser:
# print self.author.user
# print self.blog.owner
raise PermissionDenied
if not self.slug or self.slug == '':
self.slug = SlugifyUniquely(self.title, self.__class__)
trunc = Truncator(FILTERS.get(self.text_filter,
convert_linebreaks)(self.body)).chars(50, html=True)
self.summary = trunc
# finally, save the whole thing
super(Post, self).save(force_insert=force_insert,
force_update=force_update,
using=using,
update_fields=update_fields
)
LOGGER.debug("Post.save complete")
def get_archive_url(self):
"""
gets the URL of the main | |
<filename>Sipros/Scripts/sipros_ensemble_filtering.py
'''
Created on Sep 7, 2016
@author: xgo
'''
import getopt, sys, os
import numpy as np
import csv
import math
import re
try:
from sets import Set
except ImportError:
pass
from datetime import datetime, date, time
from collections import namedtuple
from sklearn import linear_model
from sklearn import preprocessing
from subprocess import call
from multiprocessing import Process
from multiprocessing import Queue, cpu_count
## Import Sipros package modules
import sipros_post_module
import sipros_peptides_assembling
import parseconfig
## Returns the current time in a nice format
curr_time = sipros_post_module.curr_time
## Format time as a pretty string
format_time = sipros_post_module.format_time
## get the file extension
get_file_list_with_ext = sipros_post_module.get_file_list_with_ext
## Class for ignoring comments '#' in sipros file
CommentedFile = sipros_post_module.CommentedFile
## global variables
## training prefix
train_str = ''
## testing prefix
test_str = ''
## reserved prefix
reserve_str = ''
## ratio of testing decoy vs forward
Test_Fwd_Ratio = 1
## maximum precurer mass windows
mass_window_max_int = 0
## feature list
feature_selection_list = []
## fwd psm value
LabelFwd = 1
## training psm value
LabelTrain = 2
## testing psm value
LabelTest = 3
## reserved psm value
LabelReserve = 4
## sip mode training psm value
LabelSipTrainFwd = 1
## forward psms
num_forward_psms_before_filtering = 0
## protein database size
num_proteins = 0
## Class for PepOutFields object
class PsmFields4(namedtuple('PsmFields',
['FileName', # 0
'ScanNumber', # 1
'ParentCharge', # 2
'MeasuredParentMass', # 3
'ScanType', # 4
'SearchName', # 5
'IdentifiedPeptide', # 6
'OriginalPeptide', # 7
'CalculatedParentMass', # 8
'MVH', # 9
'Xcorr', # 10
'WDP', # 11
'ProteinNames', # 12
'DiffMVH',
'DiffXcorr',
'DiffWDP',
'RetentionTime',
'DeltaP'])): # 33 ,
def __init__(self):
self.data = self
## save filename in this list, for saving memory
filename_list = []
## get the filename index
def get_set_filename(filename):
if filename in filename_list:
return filename_list.index(filename)
else:
filename_list.append(filename)
return filename_list.index(filename)
## save scantype in this list, for saving memory
scantype_list = []
## get the scan type index
def get_set_scantype(scantype):
if scantype in scantype_list:
return scantype_list.index(scantype)
else:
scantype_list.append(scantype)
return scantype_list.index(scantype)
## save search name in this list, for saving memory
searchname_list = []
## get the index for given search name
def get_set_searchname(searchname):
if searchname in searchname_list:
return searchname_list.index(searchname)
else:
searchname_list.append(searchname)
return searchname_list.index(searchname)
## get the percentage
def get_percentage_back(psm_list):
l = []
for e in searchname_list:
begin_i = e.index('_')
end_i = e.index('Pct')
pct_s = e[begin_i+1:end_i]
l.append(pct_s)
for psm_o in psm_list:
psm_o.pct_s = l[psm_o.SearchName]
## class defining the psm
class PSM:
# number of scores
iNumScores = 3
# Neutron mass
fNeutronMass = 1.00867108694132
# pattern for getting original peptides
pattern = re.compile('[^\w\[\]]')
def __init__(self, psm_field):
self.FileName = get_set_filename(psm_field.FileName)
self.ScanNumber = int(psm_field.ScanNumber)
self.ParentCharge = int(psm_field.ParentCharge)
self.ScanType = get_set_scantype(psm_field.ScanType)
self.SearchName = get_set_searchname(psm_field.SearchName)
self.lfScores = [float(psm_field.MVH), float(psm_field.Xcorr), float(psm_field.WDP)]
self.ProteinNames = psm_field.ProteinNames.strip()
self.IdentifiedPeptide = psm_field.IdentifiedPeptide
s1 = ''.join([char if char.isalnum() else '$' for char in self.IdentifiedPeptide ])
self.PTMscore = s1.count('$') - 2
self.OriginalPeptide = psm_field.OriginalPeptide
self.OriginalPeptide = PSM.pattern.sub('', self.IdentifiedPeptide)
self.protein_list = []
self.RealLabel = get_protein_type(self.ProteinNames, self.protein_list)
self.fPredictProbability = 0.0
self.fMassDiff = 0.0
self.dM = 0.0
self.MeasuredParentMass = float(psm_field.MeasuredParentMass)
self.CalculatedParentMass = float(psm_field.CalculatedParentMass)
self.set_mass_diff(self.MeasuredParentMass, self.CalculatedParentMass)
self.score_differential_list = []
self.iLocalRank = 0
self.DeltaP = 'NA'
if type(psm_field).__name__ == 'PsmFields4':
self.score_differential_list = [float(psm_field.DiffMVH), float(psm_field.DiffXcorr), float(psm_field.DiffWDP)]
self.DeltaP = psm_field.DeltaP
else:
print('not support input format.')
sys.exit(1)
self.NMC = 0
self.IPSC = 0
self.OPSC = 0
self.UPSC = 0 # unique peptide
self.SPSC = 0 # shared peptide
self.NRS = 0
self.PPC = 0
self.UPPC = 0
self.SPPC = 0
self.feature_list = []
self.TrainingLabel = 0
self.pct_s = ''
# extract 10 features
def get_feature_final_list(self):
del self.feature_list[:]
self.feature_list.extend(self.lfScores) # 2, 3, 4: 1, 2, 3
self.feature_list.append(abs(self.fMassDiff)) # 6: 5
self.feature_list.extend(self.score_differential_list) # 7 - 24: 6 - 23
self.feature_list.append(self.NMC) # 25: 24
self.feature_list.append((self.OPSC)) # 27: 26
self.feature_list.append((self.SPSC)) # 29: 28
'''
def get_feature_list(self):
del self.feature_list[:]
self.feature_list.append(self.ParentCharge) # 1: 0
self.feature_list.extend(self.lfScores) # 2, 3, 4: 1, 2, 3
self.feature_list.append(self.ScoreAgreement) # 5: 4
self.feature_list.append(abs(self.fMassDiff)) # 6: 5
self.feature_list.extend(self.score_differential_list) # 7 - 24: 6 - 23
self.feature_list.append(self.NMC) # 25: 24
self.feature_list.append((self.IPSC)) # 26: 25
self.feature_list.append((self.OPSC)) # 27: 26
self.feature_list.append((self.UPSC)) # 28: 27
self.feature_list.append((self.SPSC)) # 29: 28
self.feature_list.append(abs(self.iMassWindow)) # 30: 29
self.feature_list.append((self.PPC)) # 31: 30
for c in ptm_selection_list:
self.feature_list.append(self.IdentifiedPeptide.count(ptm_str[c])) # 32: 31
'''
# put proteins inside {}
def set_protein_names(self):
self.ProteinNames = '{' + ','.join(self.protein_list) + '}'
# add protein to psm, in case some protein missing
def add_protein(self, protein_l):
add_bool = False
for p in protein_l:
if p not in self.protein_list:
add_bool = True
self.protein_list.append(p)
if add_bool:
self.set_protein_names()
# get the mass difference, considering mass windows
def set_mass_diff(self, measured_mass, calculated_mass):
fDiff = calculated_mass - measured_mass
fTemp = fDiff
fCeil = 0
fDown = 0
if fDiff >= 0:
fDiff = fTemp
fCeil = math.ceil(fTemp)*PSM.fNeutronMass
fFloor = math.floor(fTemp)*PSM.fNeutronMass
if fFloor > fTemp:
fFloor -= PSM.fNeutronMass
if fCeil - PSM.fNeutronMass > fTemp:
fCeil -= PSM.fNeutronMass
if fTemp > fCeil - fTemp:
fTemp = fCeil - fTemp
if fDiff > fDiff - fFloor:
fDiff = abs(fDiff - fFloor)
if abs(fTemp) < abs(fDiff):
fDiff = fTemp
self.dM = -fTemp
else:
self.dM = fDiff
else:
fCeil = math.ceil(fDiff)*PSM.fNeutronMass
if fCeil < fDiff:
fCeil += PSM.fNeutronMass
fFloor = math.floor(fDiff)*PSM.fNeutronMass
if fFloor + PSM.fNeutronMass < fDiff:
fFloor += PSM.fNeutronMass
fDiff = fTemp
if abs(fTemp) > fCeil - fTemp:
fTemp = fCeil - fTemp
if abs(fDiff) > fDiff - fFloor:
fDiff = fDiff - fFloor
fTemp = abs(fTemp)
fDiff = abs(fDiff)
if fTemp < fDiff:
fDiff = fTemp
self.dM = -fTemp
else:
self.dM = fDiff
self.fMassDiff = fDiff
# remove training proteins and reserved proteins
def clean_protein_name(self):
self.ProteinNames = ""
l = []
if not reserve_str == "":
for sProtein in self.protein_list:
sProtein.strip()
if not (sProtein.startswith(reserve_str)):
l.append(sProtein)
self.protein_list = l
l = []
for sProtein in self.protein_list:
sProtein.strip()
if train_str == "":
if sProtein not in l:
l.append(sProtein)
elif not (sProtein.startswith(train_str)):
if sProtein not in l:
l.append(sProtein)
self.ProteinNames = '{'+','.join(l) + '}'
self.protein_list = l
# sip mode
def set_real_label(self):
self.RealLabel = get_protein_type(self.ProteinNames, self.protein_list)
# # Version control
def get_version():
return "Sipros Ensemble 1.0.1 (Alpha)"
# # Help message
help_message = '''
Usage:
python sipros_ensemble_filtering.py [options]
Inputs:
-i PSM.tab
-c Sipros Ensemble configuration file
Options:
-h show help info
-v show version info
Outputs:
-o output directory
'''
# # Parse options
def parse_options(argv):
try:
opts, _args = getopt.getopt(argv[1:], "hvi:c:o:x:")
except getopt.GetoptError:
print("illigal option(s)")
print(help_message)
sys.exit(0)
# Default working dir and config file
input_file = ""
output_folder = ""
config_file = ""
debug_code = ""
# Basic options
for option, value in opts:
if option in ("-h", "--help"):
print(help_message)
sys.exit(0)
elif option in ("-v", "-V", "--version"):
print("{} version {}".format(__file__, get_version()))
sys.exit(0)
elif option in ("-i"):
input_file = value
elif option in ("-o"):
output_folder = value
elif option in ("-c"):
config_file = value
elif option in ("-x"):
debug_code = value
if input_file == "" or output_folder == "" or config_file == '':
print(help_message)
sys.exit(0)
output_folder = os.path.join(output_folder, '')
return (input_file, config_file, output_folder, debug_code)
# # Decoy Reverse Forward protein
def protein_type(protein_sequence, lProtein=None):
sProteins = protein_sequence.replace('{', '')
sProteins = sProteins.replace('}', '')
asProteins = sProteins.split(',')
if lProtein != None:
del lProtein[:]
for sProtein in asProteins:
sProtein = sProtein.strip()
if sProtein not in lProtein:
lProtein.append(sProtein)
if reserve_str != '':
if train_str != '':
for sProtein in asProteins:
if not (sProtein.startswith(train_str) or sProtein.startswith(test_str) or sProtein.startswith(reserve_str)):
return LabelFwd
else:
for sProtein in asProteins:
if not (sProtein.startswith(test_str) or sProtein.startswith(reserve_str)):
return LabelFwd
else:
if train_str != '':
for sProtein in asProteins:
if not (sProtein.startswith(train_str) or sProtein.startswith(test_str)):
return LabelFwd
else:
for sProtein in asProteins:
if not (sProtein.startswith(test_str)):
return LabelFwd
if test_str != '':
for sProtein in asProteins:
if sProtein.startswith(test_str):
return LabelTest
if reserve_str != '':
for sProtein in asProteins:
if sProtein.startswith(reserve_str):
return LabelReserve
return LabelTrain
def get_protein_type(protein_sequence, lProtein=None):
"""
get the protein type
if all reserved type, return LabelReserve
if all testing type, return LabelTest
if all training type, return LabelTrain
otherwise, it is forward protein, return LabelFwd
"""
sProteins = protein_sequence.replace('{', '')
sProteins = sProteins.replace('}', '')
asProteins = sProteins.split(',')
if lProtein != None:
del lProtein[:]
for sProtein in asProteins:
sProtein = sProtein.strip()
if sProtein not in lProtein:
lProtein.append(sProtein)
protein_list_tmp_1 = []
protein_list_tmp_2 = []
reserve_type = True
if reserve_str != '':
for sProtein in asProteins:
if not sProtein.startswith(reserve_str):
protein_list_tmp_1.append(sProtein)
reserve_type = False
if reserve_type:
return LabelReserve
else:
protein_list_tmp_1.extend(asProteins)
training_type = True
if | |
<reponame>elbaum/phys
#!/usr/bin/python
from unit_error import UnitError
from unit_error_types import UnitErrorTypes
from tree_walker import TreeWalker
from symbol_helper import SymbolHelper
import cps_constraints as con
import os.path
from operator import itemgetter
import copy
import cppcheckdata
class ErrorChecker:
''' IMPLEMENTATION OF MAIN ERROR CHECKING
'''
def __init__(self, dump_file, source_file):
self.dump_file = dump_file
self.current_file_under_analysis = ''
self.source_file = source_file
self.source_file_exists = False
self.source_file_lines = []
self.prepare_source_file_for_reading()
self.all_errors = []
#self.all_warnings = []
self.symbol_helper = SymbolHelper()
self.have_found_addition_error_on_this_line = False
self.marked_as_low_confidence = []
self.variable_units_to_check = {}
self.variable_units_to_check_as_list = []
def prepare_source_file_for_reading(self):
# OPEN SOURCE FILE IF ERRORS FOUND
self.source_file_exists = os.path.isfile(self.source_file)
self.source_file_lines = []
if self.source_file_exists:
with open(self.source_file, 'r') as f:
self.source_file_lines = f.readlines()
else:
print 'No source file found at: %s' % self.source_file
def get_file_URI_where_error_occured(self, e):
''' GETS THE BEST URI POSSIBLE OF THE FILE CONTAINING THE ERROR
input: None
returns: string representing URI of file with error
'''
if not e.token or not self.current_file_under_analysis:
return ''
# REMOVE FILENAME FROM END
base_path = re.sub('(\w|\.)*$', '', self.current_file_under_analysis)
# APPEND FILE NAME - MIGHT INCLUDE SOMETHING LIKE "../include/laser_transform_core.h"
return base_path + e.token.file
def check_unit_errors(self, cppcheck_configuration_unit, sorted_analysis_unit_dict):
# COLLECT ERRORS
self.error_check_function_args_consistent(cppcheck_configuration_unit)
self.error_check_addition_of_incompatible_units(sorted_analysis_unit_dict)
self.error_check_comparisons(sorted_analysis_unit_dict)
#self.error_check_logical_operators(sorted_analysis_unit_dict)
self.error_check_multiple_units()
# CHECK ERRORS WITH TOP3 UNITS
self.check_errors_with_low_confidence_when_top3_units(cppcheck_configuration_unit, sorted_analysis_unit_dict)
# DISPLAY ERRORS
self.pretty_print()
def error_check_multiple_units(self):
''' MULTIPLE_UNIT_TYPE ASSIGNMENT ERROR CHECKING IMPLEMENTATION
returns: none
side_effects: might add UnitError objects to self.all_errors list
'''
for root_token, token, name, units, isKnownRhs in con.multi_unit_variables:
new_error = UnitError()
new_error.ERROR_TYPE = UnitErrorTypes.VARIABLE_MULTIPLE_UNITS
new_error.linenr = token.linenr
new_error.token = root_token
new_error.token_left = token
new_error.var_name = name
new_error.units_when_multiple_happened = units
new_error.dont_check_for_warning = isKnownRhs
new_error.is_unit_propagation_based_on_constants = \
root_token.is_unit_propagation_based_on_constants
new_error.is_unit_propagation_based_on_unknown_variable = \
root_token.is_unit_propagation_based_on_unknown_variable
if new_error.is_unit_propagation_based_on_constants or \
new_error.is_unit_propagation_based_on_unknown_variable:
new_error.is_warning = True
if (not new_error.dont_check_for_warning) and (new_error.linenr in self.marked_as_low_confidence):
new_error.is_warning = True
self.all_errors.append(new_error)
def error_check_function_args_consistent(self, cppcheck_configuration_unit):
''' VERIFIES UNIT CONSISTENCY OF FUNCTIONS AT EVERY CALL POINT
input: cppcheck configuration unit from dump
returns: none
side_effects: might add UnitError objects to self.all_errors list
'''
# FOR EACH FUNCTION
for f in cppcheck_configuration_unit.functions:
# FOR EACH ARG IN A FUNCTION
for arg_list_of_call_points in f.arg_units:
# FOR EACH TIME THE FUNCTION WAS CALLED
error_found = False
new_error = UnitError()
first_call_point_with_units = None
for call_point in arg_list_of_call_points:
# FIND SOME CALL POINT WITH UNITS
if not first_call_point_with_units:
if call_point['units']:
first_call_point_with_units = call_point
continue
# CHECK UNITS OF FIRST CALL POINT AGAINST ALL OTHERS
if call_point['units'] and (call_point['units'] != first_call_point_with_units['units']):
error_found = True
# FOUND DIFFERENT UNITS AT TWO DIFFERENT CALL POINTS
new_error.var_name = f.name
new_error.ERROR_TYPE = UnitErrorTypes.FUNCTION_CALLED_WITH_DIFFERENT_UNIT_ARGUMENTS
new_error.token = first_call_point_with_units['function']
# FIRST ASSIGNMENT
new_error.set_primary_line_number(first_call_point_with_units['linenr'])
new_error.linenr_at_first_unit_assignment = first_call_point_with_units['linenr']
new_error.units_at_first_assignment = first_call_point_with_units['units']
new_error.token_left = first_call_point_with_units['token']
# SECOND (DIFFERENT) ASSIGNMENT
new_error.linenr_of_multiple_unit_assignment = call_point['linenr']
new_error.units_when_multiple_happened = call_point['units']
new_error.token_right = call_point['token']
break
if error_found:
# IF NO RETURN UNITS, REPORT AS A WARNING
if (f.return_units == []) or f.maybe_generic_function:
new_error.is_warning = True
# GET LINE FROM ORIGINAL FILE IF IT EXISTS
if self.source_file_exists:
# TODO: resolve relative link to different file and load source
if new_error.linenr_at_first_unit_assignment <= len(self.source_file_lines) and \
new_error.linenr_of_multiple_unit_assignment <= len(self.source_file_lines):
new_error.source_code_at_first_assignment = \
self.source_file_lines[new_error.linenr_at_first_unit_assignment - 1].strip()
new_error.source_code_when_multiple_units_happened = \
self.source_file_lines[new_error.linenr_of_multiple_unit_assignment - 1].strip()
# COLLECT ERROR
self.all_errors.append(new_error)
def error_check_addition_of_incompatible_units(self, sorted_analysis_unit_dict):
''' ERROR CHECK ADDITION OF INCOMPATIBLE UNITS
input: sorted analysis unit dictionary of functions
returns: none
side_effects: might add UnitError objects to self.all_errors list
'''
for function_dict in sorted_analysis_unit_dict.values():
tw = TreeWalker(None)
for root_token in function_dict['root_tokens']:
self.have_found_addition_error_on_this_line = False
tw.generic_recurse_and_apply_function(root_token, self.error_check_addition_of_incompatible_units_recursive)
def error_check_addition_of_incompatible_units_recursive(self, token, left_token, right_token):
''' ERROR CHECK ADDITION OF INCOMPATIBLE UNITS - RESURSIVE TARGET
input: token (cppcheck token object),
left_token, right_token
returns: nothing, with possible side effect of adding errors
'''
if token.str in ['+', '-', '+=', '-=']:
# THIS IS ADDITION OR SUBTRACTION
if token.astOperand1 and token.astOperand2:
if token.astOperand1.units and token.astOperand2.units:
# BOTH CHILDREN HAVE UNITS
if token.astOperand1.units != token.astOperand2.units:
if not self.have_found_addition_error_on_this_line:
# UNIT MISMATCH ON ADDITION
new_error = UnitError()
new_error.ERROR_TYPE = UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS
new_error.is_unit_propagation_based_on_constants = token.is_unit_propagation_based_on_constants
new_error.is_unit_propagation_based_on_unknown_variable = \
token.is_unit_propagation_based_on_unknown_variable
if token.is_unit_propagation_based_on_constants or \
token.is_unit_propagation_based_on_unknown_variable:
new_error.is_warning = True
if (not new_error.is_warning):
new_error.is_warning = self.check_if_error_with_low_confidence(token, left_token, right_token)
if new_error.is_warning:
self.marked_as_low_confidence.append(token.linenr)
# LINENR
new_error.linenr = token.linenr
new_error.token_left = left_token
new_error.token_right = right_token
new_error.token = token
new_error.var_name = self.get_var_name(left_token)
# GET LINE FROM ORIGINAL FILE IF IT EXISTS
if self.source_file_exists:
pass
# COLLECT ERROR
self.all_errors.append(new_error)
self.have_found_addition_error_on_this_line = True
def error_check_comparisons(self, sorted_analysis_unit_dict):
''' ERR CHECK COMPARISION OF UNITS OVER LOGICAL OPERATORS
input: sorted analysis unit dictionary of functions
returns: none
side_effects: might add UnitError objects to self.all_errors list
'''
for function_dict in sorted_analysis_unit_dict.values():
tw = TreeWalker(None)
for root_token in function_dict['root_tokens']:
tw.generic_recurse_and_apply_function(root_token, self.error_check_comparison_recursive)
def error_check_comparison_recursive(self, token, left_token, right_token):
''' COMPARISON OPERATORS - MUST BE THE SAME ON BOTH SIDES
input: token (cppcheck token object),
left_token, right_token
returns: nothing, with possible side effect of adding errors
'''
if token.isComparisonOp:
if left_token and left_token.units and right_token and right_token.units:
# BOTH HAVE UNITS
if left_token.units != right_token.units:
# UNIT MISMATCH ON COMPARISON
new_error = UnitError()
new_error.ERROR_TYPE = UnitErrorTypes.COMPARISON_INCOMPATIBLE_UNITS
# LINENR
new_error.linenr = token.linenr
new_error.token_left = left_token
new_error.token_right = right_token
new_error.token = token
new_error.var_name = self.get_var_name(left_token)
new_error.is_unit_propagation_based_on_constants = \
left_token.is_unit_propagation_based_on_constants or \
right_token.is_unit_propagation_based_on_constants
new_error.is_unit_propagation_based_on_unknown_variable = \
left_token.is_unit_propagation_based_on_unknown_variable or \
right_token.is_unit_propagation_based_on_unknown_variable
if new_error.is_unit_propagation_based_on_constants or \
new_error.is_unit_propagation_based_on_unknown_variable:
new_error.is_warning = True
if (not new_error.is_warning):
new_error.is_warning = self.check_if_error_with_low_confidence(token, left_token, right_token)
# GET LINE FROM ORIGINAL FILE IF IT EXISTS
if self.source_file_exists:
pass
# COLLECT ERROR
self.all_errors.append(new_error)
def error_check_logical_operators(self, sorted_analysis_unit_dict):
''' ERR CHECK UNITS DURING LOGICAL OPERATIONS
input: sorted analysis unit dictionary of functions
returns: none
side_effects: might add UnitError objects to self.all_errors list
'''
for function_dict in sorted_analysis_unit_dict.values():
tw = TreeWalker(None)
for root_token in function_dict['root_tokens']:
tw.generic_recurse_and_apply_function(root_token, self.error_check_logical_recursive)
def error_check_logical_recursive(self, token, left_token, right_token):
if token.isOp and token.str in ['&&', '||', '!']:
if (left_token and left_token.units) or (right_token and right_token.units):
# UNIT ERROR ON LOGICAL OPERATION
new_error = UnitError()
new_error.ERROR_TYPE = UnitErrorTypes.LOGICAL_OPERATOR_USED_ON_UNITS
# LINENR
new_error.linenr = token.linenr
new_error.token_left = left_token
new_error.token_right = right_token
new_error.token = token
new_error.var_name = self.get_var_name(left_token)
new_error.is_unit_propagation_based_on_constants = token.is_unit_propagation_based_on_constants
new_error.is_unit_propagation_based_on_unknown_variable = \
token.is_unit_propagation_based_on_unknown_variable
if token.is_unit_propagation_based_on_constants or \
token.is_unit_propagation_based_on_unknown_variable:
new_error.is_warning = True
# GET LINE FROM ORIGINAL FILE IF IT EXISTS
if self.source_file_exists:
pass
# COLLECT ERROR
self.all_errors.append(new_error)
def check_if_error_with_low_confidence(self, token, left_token, right_token):
#con.FOUND_DERIVED_CU_VARIABLE = False
#units = self.get_left_right_units(token, left_token, right_token)
#if con.FOUND_DERIVED_CU_VARIABLE:
# if len(units) > 2:
# return True
#elif units:
# return True
return False
#return (True if units else False)
def get_left_right_units(self, token, left_token, right_token):
units = []
left_units = []
right_units = []
if left_token:
if left_token.str in ['*', '/'] and left_token.astOperand1 and left_token.astOperand2:
if left_token.astOperand1.units == [{'nounit': 0.0}]:
left_token = left_token.astOperand2
elif left_token.astOperand2.units == [{'nounit': 0.0}]:
left_token = left_token.astOperand1
if left_token.str in ['+', '-', '*', '/']:
left_units = self.get_left_right_units(left_token, left_token.astOperand1, left_token.astOperand2)
else:
left_units = left_token.units
left_name = left_token.str
if left_token.str == '.' or left_token.str == '[':
(left_token, left_name) = self.symbol_helper.find_compound_variable_and_name_for_dot_operand(left_token)
if (left_token.variable, left_name) in con.variable2unitproba:
n = 3
if con.is_only_known_unit_variable(left_token.variable, left_name):
n = 1
left_units = con.variable2unitproba[(left_token.variable, left_name)][:n]
left_units = filter(lambda (u, p): p > con.unit_prob_threshold, left_units)
left_units = map(lambda (u, p): u, left_units)
if con.ENABLE_UNIT_LIST_FLATTENING:
left_units = con.flatten_unit_list(left_units)
if right_token:
if right_token.str in ['*', '/'] and right_token.astOperand1 and right_token.astOperand2:
if right_token.astOperand1.units == [{'nounit': 0.0}]:
right_token = right_token.astOperand2
elif right_token.astOperand2.units == [{'nounit': 0.0}]:
right_token = right_token.astOperand1
if right_token.str in ['+', '-', '*', '/']:
right_units = self.get_left_right_units(right_token, right_token.astOperand1, right_token.astOperand2)
else:
right_units = right_token.units
right_name = right_token.str
if right_token.str == '.' or right_token.str == '[':
(right_token, right_name) = self.symbol_helper.find_compound_variable_and_name_for_dot_operand(right_token)
if (right_token.variable, right_name) in con.variable2unitproba:
n = 3
if con.is_only_known_unit_variable(right_token.variable, right_name):
n = 1
right_units = con.variable2unitproba[(right_token.variable, right_name)][:n]
right_units = filter(lambda (u, p): p > con.unit_prob_threshold, right_units)
right_units = map(lambda (u, p): u, right_units)
if con.ENABLE_UNIT_LIST_FLATTENING:
right_units = con.flatten_unit_list(right_units)
if not left_units:
return right_units
elif not right_units:
return left_units
else:
if token.str in ['*', '/']:
tw = TreeWalker(None)
all_unit_dicts_from_multiplication = []
for unit_dict_left in left_units:
for unit_dict_right in right_units:
result_units = tw.apply_multiplication_to_unit_dicts(
unit_dict_left,
unit_dict_right,
token.str)
if result_units:
all_unit_dicts_from_multiplication.append(result_units)
for u in all_unit_dicts_from_multiplication:
if u not in units:
units.append(u)
else:
for lu in left_units:
if lu in right_units:
units.append(lu)
return units
def check_errors_with_low_confidence_when_top3_units(self, | |
ypos = []
xneg = []
yneg = []
for x, (_, y) in zip(xdates, data):
y = 0.5 - y.Value
if y > 0:
xpos.append(x)
ypos.append(y)
else:
xneg.append(x)
yneg.append(y)
pyplot.bar(xpos, ypos, color="g", label="Positive")
pyplot.bar(xneg, yneg, color="r", label="Negative")
legend = pyplot.legend(loc=1, fontsize=args.font_size)
pyplot.ylabel("Lines of code")
pyplot.xlabel("Time")
apply_plot_style(pyplot.gcf(), pyplot.gca(), legend, args.background,
args.font_size, args.size)
pyplot.xlim(parse_date(args.start_date, xdates[0]), parse_date(args.end_date, xdates[-1]))
locator = pyplot.gca().xaxis.get_major_locator()
# set the optimal xticks locator
if "M" not in resample:
pyplot.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())
locs = pyplot.gca().get_xticks().tolist()
if len(locs) >= 16:
pyplot.gca().xaxis.set_major_locator(matplotlib.dates.YearLocator())
locs = pyplot.gca().get_xticks().tolist()
if len(locs) >= 16:
pyplot.gca().xaxis.set_major_locator(locator)
if locs[0] < pyplot.xlim()[0]:
del locs[0]
endindex = -1
if len(locs) >= 2 and pyplot.xlim()[1] - locs[-1] > (locs[-1] - locs[-2]) / 2:
locs.append(pyplot.xlim()[1])
endindex = len(locs) - 1
startindex = -1
if len(locs) >= 2 and locs[0] - pyplot.xlim()[0] > (locs[1] - locs[0]) / 2:
locs.append(pyplot.xlim()[0])
startindex = len(locs) - 1
pyplot.gca().set_xticks(locs)
# hacking time!
labels = pyplot.gca().get_xticklabels()
if startindex >= 0:
labels[startindex].set_text(xdates[0].date())
labels[startindex].set_text = lambda _: None
labels[startindex].set_rotation(30)
labels[startindex].set_ha("right")
if endindex >= 0:
labels[endindex].set_text(xdates[-1].date())
labels[endindex].set_text = lambda _: None
labels[endindex].set_rotation(30)
labels[endindex].set_ha("right")
overall_pos = sum(2 * (0.5 - d[1].Value) for d in data if d[1].Value < 0.5)
overall_neg = sum(2 * (d[1].Value - 0.5) for d in data if d[1].Value > 0.5)
title = "%s sentiment +%.1f -%.1f δ=%.1f" % (
name, overall_pos, overall_neg, overall_pos - overall_neg)
deploy_plot(title, args.output, args.background)
def show_devs(args, name, start_date, end_date, people, days):
from scipy.signal import convolve, slepian
max_people = 50
if len(people) > max_people:
print("Picking top 100 developers by commit count")
# pick top N developers by commit count
commits = defaultdict(int)
for devs in days.values():
for dev, stats in devs.items():
commits[dev] += stats.Commits
commits = sorted(((v, k) for k, v in commits.items()), reverse=True)
chosen_people = {people[k] for _, k in commits[:max_people]}
else:
chosen_people = set(people)
dists, devseries, devstats, route = order_commits(chosen_people, days, people)
route_map = {v: i for i, v in enumerate(route)}
# determine clusters
clusters = hdbscan_cluster_routed_series(dists, route)
keys = list(devseries.keys())
route = [keys[node] for node in route]
print("Plotting")
# smooth time series
start_date = datetime.fromtimestamp(start_date)
start_date = datetime(start_date.year, start_date.month, start_date.day)
end_date = datetime.fromtimestamp(end_date)
end_date = datetime(end_date.year, end_date.month, end_date.day)
size = (end_date - start_date).days + 1
plot_x = [start_date + timedelta(days=i) for i in range(size)]
resolution = 64
window = slepian(size // resolution, 0.5)
final = numpy.zeros((len(devseries), size), dtype=numpy.float32)
for i, s in enumerate(devseries.values()):
arr = numpy.array(s).transpose()
full_history = numpy.zeros(size, dtype=numpy.float32)
mask = arr[0] < size
full_history[arr[0][mask]] = arr[1][mask]
final[route_map[i]] = convolve(full_history, window, "same")
matplotlib, pyplot = import_pyplot(args.backend, args.style)
pyplot.rcParams["figure.figsize"] = (32, 16)
prop_cycle = pyplot.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
fig, axes = pyplot.subplots(final.shape[0], 1)
backgrounds = ("#C4FFDB", "#FFD0CD") if args.background == "white" else ("#05401C", "#40110E")
max_cluster = numpy.max(clusters)
for ax, series, cluster, dev_i in zip(axes, final, clusters, route):
if cluster >= 0:
color = colors[cluster % len(colors)]
i = 1
while color == "#777777":
color = colors[(max_cluster + i) % len(colors)]
i += 1
else:
# outlier
color = "#777777"
ax.fill_between(plot_x, series, color=color)
ax.set_axis_off()
author = people[dev_i]
ax.text(0.03, 0.5, author[:36] + (author[36:] and "..."),
horizontalalignment="right", verticalalignment="center",
transform=ax.transAxes, fontsize=14,
color="black" if args.background == "white" else "white")
ds = devstats[dev_i]
stats = "%5d %8s %8s" % (ds[0], _format_number(ds[1] - ds[2]), _format_number(ds[3]))
ax.text(0.97, 0.5, stats,
horizontalalignment="left", verticalalignment="center",
transform=ax.transAxes, fontsize=14, family="monospace",
backgroundcolor=backgrounds[ds[1] <= ds[2]],
color="black" if args.background == "white" else "white")
axes[0].text(0.97, 1.75, " cmts delta changed",
horizontalalignment="left", verticalalignment="center",
transform=axes[0].transAxes, fontsize=14, family="monospace",
color="black" if args.background == "white" else "white")
axes[-1].set_axis_on()
target_num_labels = 12
num_months = (end_date.year - start_date.year) * 12 + end_date.month - start_date.month
interval = int(numpy.ceil(num_months / target_num_labels))
if interval >= 8:
interval = int(numpy.ceil(num_months / (12 * target_num_labels)))
axes[-1].xaxis.set_major_locator(matplotlib.dates.YearLocator(base=max(1, interval // 12)))
axes[-1].xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%Y"))
else:
axes[-1].xaxis.set_major_locator(matplotlib.dates.MonthLocator(interval=interval))
axes[-1].xaxis.set_major_formatter(matplotlib.dates.DateFormatter("%Y-%m"))
for tick in axes[-1].xaxis.get_major_ticks():
tick.label.set_fontsize(args.font_size)
axes[-1].spines["left"].set_visible(False)
axes[-1].spines["right"].set_visible(False)
axes[-1].spines["top"].set_visible(False)
axes[-1].get_yaxis().set_visible(False)
axes[-1].set_facecolor((1.0,) * 3 + (0.0,))
title = ("%s commits" % name) if not args.output else ""
deploy_plot(title, args.output, args.background)
def order_commits(chosen_people, days, people):
try:
from fastdtw import fastdtw
except ImportError as e:
print("Cannot import fastdtw: %s\nInstall it from https://github.com/slaypni/fastdtw" % e)
sys.exit(1)
devseries = defaultdict(list)
devstats = defaultdict(lambda: DevDay(0, 0, 0, 0, {}))
for day, devs in sorted(days.items()):
for dev, stats in devs.items():
if people[dev] in chosen_people:
devseries[dev].append((day, stats.Commits))
devstats[dev] = devstats[dev].add(stats)
print("Calculating the distance matrix")
# max-normalize the time series using a sliding window
series = list(devseries.values())
for i, s in enumerate(series):
arr = numpy.array(s).transpose().astype(numpy.float32)
commits = arr[1]
if len(commits) < 7:
commits /= commits.max()
else:
# 4 is sizeof(float32)
windows = numpy.lib.stride_tricks.as_strided(commits, [len(commits) - 6, 7], [4, 4])
commits = numpy.concatenate((
[windows[0, 0] / windows[0].max(),
windows[0, 1] / windows[0].max(),
windows[0, 2] / windows[0].max()],
windows[:, 3] / windows.max(axis=1),
[windows[-1, 4] / windows[-1].max(),
windows[-1, 5] / windows[-1].max(),
windows[-1, 6] / windows[-1].max()]
))
arr[1] = commits * 7 # 7 is a pure heuristic here and is not related to window size
series[i] = list(arr.transpose())
# calculate the distance matrix using dynamic time warping metric
dists = numpy.full((len(series) + 1, len(series) + 1), -100500, dtype=numpy.float32)
for x in range(len(series)):
dists[x, x] = 0
for y in range(x + 1, len(series)):
# L1 norm
dist, _ = fastdtw(series[x], series[y], radius=5, dist=1)
dists[x, y] = dists[y, x] = dist
# preparation for seriation ordering
dists[len(series), :] = 0
dists[:, len(series)] = 0
assert (dists >= 0).all()
print("Ordering the series")
route = seriate(dists)
return dists, devseries, devstats, route
def hdbscan_cluster_routed_series(dists, route):
try:
from hdbscan import HDBSCAN
except ImportError as e:
print("Cannot import ortools: %s\nInstall it from "
"https://developers.google.com/optimization/install/python/" % e)
sys.exit(1)
opt_dist_chain = numpy.cumsum(numpy.array(
[0] + [dists[route[i], route[i + 1]] for i in range(len(route) - 1)]))
clusters = HDBSCAN(min_cluster_size=2).fit_predict(opt_dist_chain[:, numpy.newaxis])
return clusters
def seriate(dists):
try:
from ortools.constraint_solver import pywrapcp, routing_enums_pb2
except ImportError as e:
print("Cannot import ortools: %s\nInstall it from "
"https://developers.google.com/optimization/install/python/" % e)
sys.exit(1)
# solve the TSP on the distance matrix
routing = pywrapcp.RoutingModel(dists.shape[0], 1, dists.shape[0] - 1)
def dist_callback(x, y):
# ortools wants integers, so we approximate here
return int(dists[x][y] * 1000)
routing.SetArcCostEvaluatorOfAllVehicles(dist_callback)
search_parameters = pywrapcp.RoutingModel.DefaultSearchParameters()
search_parameters.local_search_metaheuristic = (
routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
search_parameters.time_limit_ms = 2000
assignment = routing.SolveWithParameters(search_parameters)
index = routing.Start(0)
route = []
while not routing.IsEnd(index):
node = routing.IndexToNode(index)
if node < dists.shape[0] - 1:
route.append(node)
index = assignment.Value(routing.NextVar(index))
return route
def show_devs_efforts(args, name, start_date, end_date, people, days, max_people):
from scipy.signal import convolve, slepian
start_date = datetime.fromtimestamp(start_date)
start_date = datetime(start_date.year, start_date.month, start_date.day)
end_date = datetime.fromtimestamp(end_date)
end_date = datetime(end_date.year, end_date.month, end_date.day)
efforts_by_dev = defaultdict(int)
for day, devs in days.items():
for dev, stats in devs.items():
efforts_by_dev[dev] += stats.Added + stats.Removed + stats.Changed
if len(efforts_by_dev) > max_people:
chosen = {v for k, v in sorted(
((v, k) for k, v in efforts_by_dev.items()), reverse=True)[:max_people]}
print("Warning: truncated people to the most active %d" % max_people)
else:
chosen = set(efforts_by_dev)
chosen_efforts = sorted(((efforts_by_dev[k], k) for k in chosen), reverse=True)
chosen_order = {k: i for i, (_, k) in enumerate(chosen_efforts)}
efforts = numpy.zeros((len(chosen) + 1, (end_date - start_date).days + 1), dtype=numpy.float32)
for day, devs in days.items():
if day < efforts.shape[1]:
for dev, stats in devs.items():
dev = chosen_order.get(dev, len(chosen_order))
efforts[dev][day] += stats.Added + stats.Removed + stats.Changed
efforts_cum = numpy.cumsum(efforts, axis=1)
window = slepian(10, 0.5)
window /= window.sum()
for e in (efforts, efforts_cum):
for i in range(e.shape[0]):
ending = e[i][-len(window) * 2:].copy()
e[i] = convolve(e[i], window, "same")
e[i][-len(ending):] = ending
matplotlib, pyplot = import_pyplot(args.backend, args.style)
plot_x = [start_date + timedelta(days=i) for i in range(efforts.shape[1])]
people = [people[k] for _, k in chosen_efforts] + ["others"]
for i, name in enumerate(people):
if len(name) > 40:
people[i] = name[:37] + "..."
polys = pyplot.stackplot(plot_x, efforts_cum, labels=people)
if len(polys) == max_people + 1:
polys[-1].set_hatch("/")
polys = pyplot.stackplot(plot_x, -efforts * efforts_cum.max() / efforts.max())
if len(polys) == max_people + 1:
polys[-1].set_hatch("/")
yticks = []
for tick in pyplot.gca().yaxis.iter_ticks():
if tick[1] >= 0:
yticks.append(tick[1])
pyplot.gca().yaxis.set_ticks(yticks)
legend = pyplot.legend(loc=2, ncol=2, fontsize=args.font_size)
apply_plot_style(pyplot.gcf(), pyplot.gca(), legend, args.background,
args.font_size, args.size or "16,10")
deploy_plot("Efforts through time (changed lines of code)", args.output, args.background)
def show_old_vs_new(args, name, start_date, end_date, people, days):
from scipy.signal import convolve, slepian
start_date = datetime.fromtimestamp(start_date)
start_date = datetime(start_date.year, start_date.month, start_date.day)
end_date = datetime.fromtimestamp(end_date)
end_date = datetime(end_date.year, end_date.month, end_date.day)
| |
import numpy as np
import math
from constants import *
from utils import deg3_real_roots, intersect, get_sorting
_apply_vectorized = np.vectorize(lambda f, x: f(x), otypes=[np.dtype('float')])
class PiecewiseFunction:
def __init__(self, xs, fns):
self._xs = np.array(xs)
self._fns = np.array(fns)
assert(len(self._xs) > 0)
assert(len(self._fns) > 0)
if len(self._xs) == len(self._fns)+1:
self._fns = np.concatenate((self._fns, np.array([LinearFunction(0, self._fns[-1](self._xs[-1]))])))
assert(len(self._xs) == len(self._fns))
assert(np.all([callable(f) for f in self._fns]))
def __get_interval_index(self, values, v):
idxs = np.full_like(v, 0, dtype='int32')
for idx in range(len(values)):
idxs[v >= values[idx]] = idx
return idxs
def __call__(self, x):
is_scalar = isinstance(x, float) or isinstance(x, int) or isinstance(x, np.int64) or isinstance(x, np.int32)
if is_scalar:
x = np.array([x])
xidxs = self.__get_interval_index(self._xs, x)
y = _apply_vectorized(self._fns[xidxs], x)
if is_scalar:
return y[0]
else:
return y
def __repr__(self):
return "PiecewiseFunction(" + str(list(self._xs)) + "," + str(list(self._fns)) + ")"
def diff(self):
fns_diff = [f.diff() for f in fns[1:-1]]
return PiecewiseFunction(self._xs, fns_diff)
def domains(self):
return zip(self._xs, self._fns)
def intervals(self):
intervals = [(x_min, x_max, fn) for x_min, x_max, fn in zip(self._xs[:-1], self._xs[1:], self._fns)]
intervals.append((self._xs[-1], float('inf'), self._fns[-1]))
return intervals
class LinearFunction:
def __init__(self, d, c, b=0):
self._a = d
self._b = c - d*b
def __call__(self, x):
is_scalar = isinstance(x, float) or isinstance(x, int) or isinstance(x, np.int64) or isinstance(x, np.int32)
if is_scalar:
x = np.array([x], dtype=np.float)
ys = None
if self._a == 0.0:
ys = np.full_like(x, self._b)
else:
ys = self._a*x + self._b
if is_scalar:
return ys[0]
else:
return ys
def __repr__(self):
return "LinearFunction({}, {})".format(self._a, self._b)
def inverse(self, domain=[float('-inf'), float('inf')]):
if self._a > 0 or self._a < 0:
return LinearFunction(1. / self._a, - self._b / self._a)
else:
return LinearFunction(0, domain[0])
def diff(self):
return LinearFunction(d=0, c=self._a)
def params(self):
return (self._a, self._b)
class InvHypFunction:
def __init__(self, a, b, c, domain=[float('-inf'), float('inf')]):
self._a = a
self._b = b
self._c = c
self._domain = domain
def __repr__(self):
return "InvHypFunction({}, {}, {}, domain={})".format(self._a, self._b, self._c, self._domain)
# assumes x >= b
def __call__(self, y):
is_scalar = isinstance(x, float) or isinstance(x, int) or isinstance(x, np.int64) or isinstance(x, np.int32)
if is_scalar:
ys = np.array([y])
else:
ys = np.array(y)
if np.any(ys < self._c):
raise Exception("Trying to invert an invalid value {}".format(ys[ys < self._c]))
xs = np.full_like(ys, self._domain[1])
xs[ys > self._c] = self._b + np.sqrt(self._a / (ys[ys > self._c] - self._c))
if is_scalar:
return xs[0]
else:
return xs
def inverse(self):
return HypLinFunction(self._a, self._b, self._c, 0)
class HypLinFunction:
def __init__(self, a, b, c, d):
assert(a > 0)
assert(np.isfinite(a))
assert(np.isfinite(b))
assert(np.isfinite(c))
assert(np.isfinite(d))
self._a = a
self._b = b
self._c = c
self._d = d
def __call__(self, x):
if math.fabs(self._a) < eps:
if math.fabs(self._d) < eps:
return self._c
else:
return self._c + self._d*(x - self._b)
else:
return self._a / (x - self._b)**2 + self._c + self._d*(x - self._b)
def __repr__(self):
return "HypLinFunction({}, {}, {}, {})".format(self._a, self._b, self._c, self._d)
def inverse(self, domain=[float('-inf'), float('inf')]):
if self._d < 0 or self._d > 0:
if self._a > 0. or self._a < 0:
raise Exception("HypLinFunction should not occur")
else:
return LinearFunction(self._c, self._d).inverse()
else:
return InvHypFunction(self._a, self._b, self._c, domain=domain)
def diff(self):
return lambda x, a=self._a, b=self._b, d=self._d: -2 * a / (x - b)**3 + d
def params(self):
return (self._a, self._b, self._c, self._d)
class ConstantFunction(PiecewiseFunction):
def __init__(self, t_min, c):
f = LinearFunction(0, c)
super().__init__([0, t_min], [LinearFunction(0, float('inf')), f])
class TradeoffFunction(PiecewiseFunction):
def __init__(self, t_min, t_max, a, b, c):
assert(math.fabs(a) > 0)
hyp = HypLinFunction(a, b, c, 0)
super().__init__([0, t_min, t_max], [LinearFunction(0, float('inf')), hyp, LinearFunction(0, hyp(t_max))])
class ChargingFunction(PiecewiseFunction):
def __init__(self, ts, ys, M):
assert(ys[0] == 0)
assert(ys[-1] == M)
fns = np.concatenate((make_piecewise_linear(ts, ys), [LinearFunction(0., ys[-1])]))
super().__init__(ts, fns)
def inverse(self):
return invert_piecewise_linear(self)
def make_piecewise_linear(xs, ys):
dx = xs[1:]-xs[:-1]
dy = ys[1:]-ys[:-1]
dy[ys[1:] == ys[:-1]] = 0
A = dy/dx
B = ys[:-1] - A*xs[:-1]
index = A >= float('inf')
A[index] = 0
B[index] = ys[:-1][index]
index = A <= -float('inf')
A[index] = 0
B[index] = ys[:-1][index]
fns = []
for a,b in zip(A,B):
fns.append(LinearFunction(a, b))
return fns
def invert_piecewise_linear(f):
ys = [sub_f(x_min) for x_min, sub_f in f.domains()]
asc, dsc = get_sorting(list(ys))
if not asc and not dsc:
raise Exception("Not monotone")
if asc:
inv_domains = []
for i, (x_min, x_max, sub_f) in enumerate(f.intervals()):
y_min = ys[i]
inv_sub_f = sub_f.inverse(domain=[x_min, x_max])
inv_domains.append((y_min, inv_sub_f))
else:
assert(dsc)
inv_domains = []
for i, (x_min, x_max, sub_f) in enumerate(f.intervals()):
y_min = sub_f(x_max)
inv_sub_f = sub_f.inverse(domain=[x_min, x_max])
inv_domains.append((y_min, inv_sub_f))
inv_domains = list(reversed(inv_domains))
xs = [x for x, _ in inv_domains]
fns = [f for _, f in inv_domains]
return PiecewiseFunction(xs, fns)
# positive value shifts the function to the left
# negative value to the right
def shift(f, x_shift):
assert(isinstance(f, PiecewiseFunction))
new_xs = []
new_fns = []
for x_min, sub_f in f.domains():
assert(isinstance(sub_f, LinearFunction))
a, b = sub_f.params()
b = b + a*x_shift
new_xs.append(x_min - x_shift)
new_fns.append(LinearFunction(a, b))
return PiecewiseFunction(new_xs, new_fns)
def clip(f, x_0):
assert(isinstance(f, PiecewiseFunction))
new_xs = []
new_fns = []
for x_min, x_max, sub_f in f.intervals():
new_x_min = max(x_min, x_0)
if new_x_min < x_max:
new_xs.append(new_x_min)
new_fns.append(sub_f)
if len(new_fns) > 0:
return PiecewiseFunction(new_xs, new_fns)
else:
return None
def multiply(f, c):
assert(isinstance(f, PiecewiseFunction))
new_xs = []
new_fns = []
for x_min, x_max, sub_f in f.intervals():
assert(isinstance(sub_f, LinearFunction))
a, b = sub_f.params()
new_xs.append(x_min)
new_fns.append(LinearFunction(c*a, c*b))
return PiecewiseFunction(new_xs, new_fns)
def offset(f, c):
assert(isinstance(f, PiecewiseFunction))
new_xs = []
new_fns = []
for x_min, x_max, sub_f in f.intervals():
assert(isinstance(sub_f, LinearFunction))
a, b = sub_f.params()
new_xs.append(x_min)
new_fns.append(LinearFunction(a, c+b))
return PiecewiseFunction(new_xs, new_fns)
# Intersects two functions f_1 and f_2
# The assumption is that they are either
# hyperbolic or linear
def intersect_functions(f_1, f_2, domain):
x_min, x_max = domain
if len(f_1.params()) == 4:
a_1, b_1, c_1, d_1 = f_1.params()
elif len(f_1.params()) == 2:
d_1, c_1 = f_1.params()
a_1 = 0
b_1 = 0
if len(f_2.params()) == 4:
a_2, b_2, c_2, d_2 = f_2.params()
elif len(f_2.params()) == 2:
d_2, c_2 = f_2.params()
a_2 = 0
b_2 = 0
intersections = []
if a_1 == 0 and a_2 == 0:
if math.fabs(d_1 - d_2) > eps:
x = (c_2 - c_1) / (d_1 - d_2)
if x > x_min and x < x_max:
intersections = [x]
elif a_1 == 0 and a_2 != 0:
if math.fabs(d_1) > eps:
intersections = deg3_real_roots(-d_1, c_2 - c_1 - d_1 * b_2, 0, a_2)
intersections = [z+b_2 for z in intersections if z+b_2 > x_min and z+b_2 < x_max]
elif math.fabs(c_1 - c_2) < eps:
return []
else:
x = b_2 + np.sqrt(a_2 / (c_2 - c_1))
if x > x_min and x < x_max:
intersections = [x]
elif a_1 != 0 and a_2 == 0:
if math.fabs(d_2) > eps:
intersections = deg3_real_roots(-d_2, c_1 - c_2 - d_2 * b_1, 0, a_1)
intersections = [z+b_1 for z in intersections if z+b_1 > x_min and z+b_1 < x_max]
elif math.fabs(c_1 - c_2) < eps:
return []
else:
x = b_1 + np.sqrt(a_1 / (c_1 - c_2))
if x > x_min and x < x_max:
intersections = [x]
else:
# FIXME two hyperbolic functions generaly don't intersect
# but we need a better test
return []
return intersections
# Takes a list of intervals of the form (x_min, x_max, ...) and a key function
# that maps an interval and an x value to its y value.
# Returns a list (x_min, x_max, index) where index refers to the input interval
def lower_envelop(intervals, key):
assert(np.all([float('inf') > key(interval, interval[0]) for interval in intervals]))
assert(np.all([math.fabs(interval[0] - interval[1]) > eps for interval in intervals]))
start_points = [(interval[0], i) for (i, interval) in enumerate(intervals)]
end_points = [(interval[1], i) for (i, interval) in enumerate(intervals)]
points = sorted(start_points + end_points)
active_intervals = []
minimum = []
while len(points) > 0:
current_x = points[0][0]
# process all events
while len(points) > 0 and points[0][0] <= current_x + eps:
x, index = points.pop(0)
if index >= 0:
interval = intervals[index]
if interval[0] + eps >= current_x:
active_intervals.append(index)
else:
assert(interval[1] + eps >= current_x)
active_intervals.remove(index)
if len(active_intervals) > 0:
next_x = points[0][0]
epsilon_key = lambda a: (eps_round(a[0]), eps_round(a[1]))
sorted_intervals = sorted([(key(intervals[index], current_x), key(intervals[index], next_x), index) for index in active_intervals], key=epsilon_key)
current_y = sorted_intervals[0][0]
minimum.append((current_x, sorted_intervals[0][2]))
for (y_1, y_1_next, index_1), (y_2, y_2_next, index_2) in zip(sorted_intervals[:-1], sorted_intervals[1:]):
# FIXME this need to be replaced by a | |
8, 1236: 2, 1265: 4, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1322: 8, 1331: 8, 1332: 8, 1333: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
},
{
68: 8, 127: 8, 304: 8, 320: 8, 339: 8, 352: 8, 356: 4, 544: 8, 576: 8, 593: 8, 688: 5, 881: 8, 882: 8, 897: 8, 902: 8, 903: 8, 909: 8, 912: 7, 916: 8, 1040: 8, 1056: 8, 1057: 8, 1078: 4, 1136: 6, 1151: 6, 1168: 7, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1265: 4, 1268: 8, 1280: 1, 1287: 4, 1290: 8, 1291: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1355: 8, 1363: 8, 1369: 8, 1371: 8, 1407: 8, 1419: 8, 1420: 8, 1425: 2, 1427: 6, 1429: 8, 1430: 8, 1448: 8, 1456: 4, 1470: 8, 1476: 8, 1535: 8
}],
CAR.PALISADE: [{
67: 8, 127: 8, 304: 8, 320: 8, 339: 8, 356: 4, 544: 8, 546: 8, 547: 8, 548: 8, 549: 8, 576: 8, 593: 8, 608: 8, 688: 6, 809: 8, 832: 8, 854: 7, 870: 7, 871: 8, 872: 8, 897: 8, 902: 8, 903: 8, 905: 8, 909: 8, 913: 8, 916: 8, 1040: 8, 1042: 8, 1056: 8, 1057: 8, 1064: 8, 1078: 4, 1107: 5, 1123: 8, 1136: 8, 1151: 6, 1155: 8, 1156: 8, 1157: 4, 1162: 8, 1164: 8, 1168: 7, 1170: 8, 1173: 8, 1180: 8, 1186: 2, 1191: 2, 1193: 8, 1210: 8, 1225: 8, 1227: 8, 1265: 4, 1280: 8, 1287: 4, 1290: 8, 1292: 8, 1294: 8, 1312: 8, 1322: 8, 1342: 6, 1345: 8, 1348: 8, 1363: 8, 1369: 8, 1371: 8, 1378: 8, 1384: 8, 1407: 8, 1419: 8, 1427: 6, 1456: 4, 1470: 8, 1988: 8, 1996: 8, 2000: 8, 2004: 8, 2005: 8, 2008: 8, 2012: 8
}],
}
FW_VERSIONS = {
CAR.IONIQ: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00AEhe SCC H-CUP 1.01 1.01 96400-G2000 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00AE MDPS C 1.00 1.07 56310/G2301 4AEHC107',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEH MFC AT EUR LHD 1.00 1.00 95740-G2400 180222',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F2051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U3H1051\x00\x00\xf1\x006U3H0_C2\x00\x006U3H1051\x00\x00HAE0G16US2\x00\x00\x00\x00',
],
},
CAR.IONIQ_PHEV: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\000AEhe SCC FHCUP 1.00 1.02 99110-G2100 ',
b'\xf1\x00AEhe SCC F-CUP 1.00 1.00 99110-G2200 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\000AE MDPS C 1.00 1.01 56310/G2510 4APHC101',
b'\xf1\x00AE MDPS C 1.00 1.01 56310/G2560 4APHC101',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\000AEP MFC AT USA LHD 1.00 1.01 95740-G2600 190819',
b'\xf1\x00AEP MFC AT EUR RHD 1.00 1.01 95740-G2600 190819',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6F6051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U3J9051\000\000\xf1\0006U3H1_C2\000\0006U3J9051\000\000PAE0G16NL0\x82zT\xd2',
b'\xf1\x816U3J8051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J8051\x00\x00PAETG16UL0\x00\x00\x00\x00',
],
},
CAR.IONIQ_EV_2020: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00AEev SCC F-CUP 1.00 1.01 99110-G7000 ',
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 99110-G7200 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00AE MDPS C 1.00 1.01 56310/G7310 4APEC101',
b'\xf1\x00AE MDPS C 1.00 1.01 56310/G7560 4APEC101',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.01 95740-G2600 190819',
b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.03 95740-G2500 190516',
b'\xf1\x00AEE MFC AT EUR RHD 1.00 1.01 95740-G2600 190819',
],
},
CAR.IONIQ_EV_LTD: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 96400-G7000 ',
b'\xf1\x00AEev SCC F-CUP 1.00 1.00 96400-G7100 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00AE MDPS C 1.00 1.02 56310G7300\x00 4AEEC102',
b'\xf1\x00AE MDPS C 1.00 1.04 56310/G7501 4AEEC104',
b'\xf1\x00AE MDPS C 1.00 1.03 56310/G7300 4AEEC103',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.00 95740-G7200 160418',
b'\xf1\x00AEE MFC AT USA LHD 1.00 1.00 95740-G2400 180222',
b'\xf1\x00AEE MFC AT EUR LHD 1.00 1.00 95740-G2300 170703',
],
},
CAR.IONIQ_HEV_2022: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00AEhe SCC F-CUP 1.00 1.00 99110-G2600 ',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00AE MDPS C 1.00 1.01 56310G2510\x00 4APHC101',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00AEH MFC AT USA LHD 1.00 1.00 95740-G2700 201027',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x816H6G5051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x816U3J9051\x00\x00\xf1\x006U3H1_C2\x00\x006U3J9051\x00\x00HAE0G16NL2\x00\x00\x00\x00',
],
},
CAR.SONATA: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00DN8_ SCC F-CU- 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC F-CUP 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC F-CUP 1.00 1.02 99110-L1000 ',
b'\xf1\x00DN8_ SCC FHCUP 1.00 1.00 99110-L0000 ',
b'\xf1\x00DN8_ SCC FHCUP 1.00 1.01 99110-L1000 ',
b'\xf1\x00DN89110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa ',
b'\xf1\x00DN8 1.00 99110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa ',
b'\xf1\x00DN8 1.00 99110-L0000 \xaa\xaa\xaa\xaa\xaa\xaa\xaa\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x8799110L0000\xf1\x00DN8_ SCC FHCUP 1.00 1.00 99110-L0000 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00DN ESC \a 106 \a\x01 58910-L0100',
b'\xf1\x00DN ESC \x01 102\x19\x04\x13 58910-L1300',
b'\xf1\x00DN ESC \x03 100 \x08\x01 58910-L0300',
b'\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100',
b'\xf1\x00DN ESC \x07 104\x19\x08\x01 58910-L0100',
b'\xf1\x00DN ESC \x08 103\x19\x06\x01 58910-L1300',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \a 106 \a\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 104\x19\x08\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x06 106 \x07\x01 58910-L0100',
b'\xf1\x8758910-L0100\xf1\x00DN ESC \x07 104\x19\x08\x01 58910-L0100',
b'\xf1\x8758910-L0300\xf1\x00DN ESC \x03 100 \x08\x01 58910-L0300',
b'\xf1\x00DN ESC \x06 106 \x07\x01 58910-L0100',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81HM6M1_0a0_F00',
b'\xf1\x82DNBVN5GMCCXXXDCA',
b'\xf1\x82DNBWN5TMDCXXXG2E',
b'\xf1\x82DNCVN5GMCCXXXF0A',
b'\xf1\x82DNCVN5GMCCXXXG2B',
b'\xf1\x87391162M003',
b'\xf1\x87391162M013',
b'\xf1\x87391162M023',
b'HM6M1_0a0_F00',
b'HM6M1_0a0_G20',
b'HM6M2_0a0_BD0',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00DN8 MDPS C 1.00 1.01 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4DNAC101',
b'\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0010 4DNAC101',
b'\xf1\x00DN8 MDPS C 1.00 1.01 56310L0010\x00 4DNAC101',
b'\xf1\x00DN8 MDPS R 1.00 1.00 57700-L0000 4DNAP100',
b'\xf1\x87\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf1\x00DN8 MDPS C 1.00 1.01 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 4DNAC101',
b'\xf1\x8756310-L0010\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0010 4DNAC101',
b'\xf1\x8756310-L0210\xf1\x00DN8 MDPS C 1.00 1.01 56310-L0210 4DNAC101',
b'\xf1\x8756310-L1010\xf1\x00DN8 MDPS C 1.00 1.03 56310-L1010 4DNDC103',
b'\xf1\x8756310-L1030\xf1\x00DN8 MDPS C 1.00 1.03 56310-L1030 4DNDC103',
b'\xf1\x8756310L0010\x00\xf1\x00DN8 MDPS C 1.00 1.01 56310L0010\x00 4DNAC101',
b'\xf1\x8756310L0210\x00\xf1\x00DN8 MDPS C 1.00 1.01 56310L0210\x00 4DNAC101',
b'\xf1\x8757700-L0000\xf1\x00DN8 MDPS R 1.00 1.00 57700-L0000 4DNAP100',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00DN8 MFC AT KOR LHD 1.00 1.02 99211-L1000 190422',
b'\xf1\x00DN8 MFC AT RUS LHD 1.00 1.03 99211-L1000 190705',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.00 99211-L0000 190716',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.01 99211-L0000 191016',
b'\xf1\x00DN8 MFC AT USA LHD 1.00 1.03 99211-L0000 210603',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB1\xe3\xc10\xa1',
b'\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
b'\xf1\x00HT6TA260BLHT6TA800A1TDN8C20KS4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00HT6TA260BLHT6TA810A1TDN8M25GS0\x00\x00\x00\x00\x00\x00\xaa\x8c\xd9p',
b'\xf1\x00HT6WA250BLHT6WA910A1SDN8G25NB1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x00HT6WA250BLHT6WA910A1SDN8G25NB1\x00\x00\x00\x00\x00\x00\x96\xa1\xf1\x92',
b'\xf1\x00HT6WA280BLHT6WAD10A1SDN8G25NB2\x00\x00\x00\x00\x00\x00\x08\xc9O:',
b'\xf1\x00T02601BL T02730A1 VDN8T25XXX730NS5\xf7_\x92\xf5',
b'\xf1\x87954A02N060\x00\x00\x00\x00\x00\xf1\x81T02730A1 \xf1\x00T02601BL T02730A1 VDN8T25XXX730NS5\xf7_\x92\xf5',
b'\xf1\x87SALDBA3510954GJ3ww\x87xUUuWx\x88\x87\x88\x87w\x88wvfwfc_\xf9\xff\x98wO\xffl\xe0\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3573534GJ3\x89\x98\x89\x88EUuWgwvwwwwww\x88\x87xTo\xfa\xff\x86f\x7f\xffo\x0e\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3601464GJ3\x88\x88\x88\x88ffvggwvwvw\x87gww\x87wvo\xfb\xff\x98\x88\x7f\xffjJ\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3753044GJ3UUeVff\x86hwwwwvwwgvfgfvo\xf9\xfffU_\xffC\xae\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA3873834GJ3fefVwuwWx\x88\x97\x88w\x88\x97xww\x87wU_\xfb\xff\x86f\x8f\xffN\x04\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA4525334GJ3\x89\x99\x99\x99fevWh\x88\x86\x88fwvgw\x88\x87xfo\xfa\xffuDo\xff\xd1>\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA4626804GJ3wwww\x88\x87\x88xx\x88\x87\x88wwgw\x88\x88\x98\x88\x95_\xf9\xffuDo\xff|\xe7\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA4803224GJ3wwwwwvwg\x88\x88\x98\x88wwww\x87\x88\x88xu\x9f\xfc\xff\x87f\x8f\xff\xea\xea\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA6347404GJ3wwwwff\x86hx\x88\x97\x88\x88\x88\x88\x88vfgf\x88?\xfc\xff\x86Uo\xff\xec/\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA6901634GJ3UUuWVeVUww\x87wwwwwvUge\x86/\xfb\xff\xbb\x99\x7f\xff]2\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALDBA7077724GJ3\x98\x88\x88\x88ww\x97ygwvwww\x87ww\x88\x87x\x87_\xfd\xff\xba\x99o\xff\x99\x01\xf1\x89HT6WA910A1\xf1\x82SDN8G25NB1\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SALFBA4195874GJ2EVugvf\x86hgwvwww\x87wgw\x86wc_\xfb\xff\x98\x88\x8f\xff\xe23\xf1\x81U903\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U903\x00\x00\x00\x00\x00\x00SDN8T16NB0z{\xd4v',
b'\xf1\x87SAMDBA8054504GJ3gw\x87xffvgffffwwwweUVUf?\xfc\xffvU_\xff\xddl\xf1\x89HT6WAD10A1\xf1\x82SDN8G25NB2\x00\x00\x00\x00\x00\x00',
b'\xf1\x87SAMFBA9708354GJ2wwwwVf\x86h\x88wx\x87xww\x87\x88\x88\x88\x88w/\xfa\xff\x97w\x8f\xff\x86\xa0\xf1\x81U913\x00\x00\x00\x00\x00\x00\xf1\x00bcsh8p54 U913\x00\x00\x00\x00\x00\x00SDN8T16NB2\n\xdd^\xbc',
],
},
CAR.SONATA_LF: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00LF__ SCC F-CUP 1.00 1.00 96401-C2200 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00LF ESC \f 11 \x17\x01\x13 58920-C2610',
b'\xf1\x00LF ESC \t 11 \x17\x01\x13 58920-C2610',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81606D5051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606D5K51\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606G1051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00LFF LKAS AT USA LHD 1.00 1.01 95740-C1000 E51',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87LAHSGN012918KF10\x98\x88x\x87\x88\x88x\x87\x88\x88\x98\x88\x87w\x88w\x88\x88\x98\x886o\xf6\xff\x98w\x7f\xff3\x00\xf1\x816W3B1051\x00\x00\xf1\x006W351_C2\x00\x006W3B1051\x00\x00TLF0T20NL2\x00\x00\x00\x00',
b'\xf1\x87LAHSGN012918KF10\x98\x88x\x87\x88\x88x\x87\x88\x88\x98\x88\x87w\x88w\x88\x88\x98\x886o\xf6\xff\x98w\x7f\xff3\x00\xf1\x816W3B1051\x00\x00\xf1\x006W351_C2\x00\x006W3B1051\x00\x00TLF0T20NL2H\r\xbdm',
b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\xb0\x9f\xee\xf5',
b'\xf1\x87\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x816T6B4051\x00\x00\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\x00\x00\x00\x00',
b'\xf1\x006T6H0_C2\x00\x006T6B4051\x00\x00TLF0G24NL1\xb0\x9f\xee\xf5',
],
},
CAR.SANTA_FE: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00TM__ SCC F-CUP 1.00 1.01 99110-S2000 ',
b'\xf1\x00TM__ SCC F-CUP 1.00 1.02 99110-S2000 ',
b'\xf1\x00TM__ SCC F-CUP 1.00 1.03 99110-S2000 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00TM ESC \r 100\x18\x031 58910-S2650',
b'\xf1\x00TM ESC \r 103\x18\x11\x08 58910-S2650',
b'\xf1\x00TM ESC \r 104\x19\a\b 58910-S2650',
b'\xf1\x00TM ESC \x02 100\x18\x030 58910-S2600',
b'\xf1\x00TM ESC \x02 102\x18\x07\x01 58910-S2600',
b'\xf1\x00TM ESC \x02 103\x18\x11\x07 58910-S2600',
b'\xf1\x00TM ESC \x02 104\x19\x07\x07 58910-S2600',
b'\xf1\x00TM ESC \x03 103\x18\x11\x07 58910-S2600',
b'\xf1\x00TM ESC \x0c 103\x18\x11\x08 58910-S2650',
],
(Ecu.engine, 0x7e0, None): [
b'\xf1\x81606EA051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606G1051\x00\x00\x00\x00\x00\x00\x00\x00',
b'\xf1\x81606G3051\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7d4, None): [
b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8409',
b'\xf1\x00TM MDPS C 1.00 1.00 56340-S2000 8A12',
b'\xf1\x00TM MDPS C 1.00 1.01 56340-S2000 9129',
],
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00TM MFC AT USA LHD 1.00 1.00 99211-S2000 180409',
],
(Ecu.transmission, 0x7e1, None): [
b'\xf1\x87LBJSGA7082574HG0\x87www\x98\x88\x88\x88\x99\xaa\xb9\x9afw\x86gx\x99\xa7\x89co\xf8\xffvU_\xffR\xaf\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2T20NS1\x00\xa6\xe0\x91',
b'\xf1\x87LBKSGA0458404HG0vfvg\x87www\x89\x99\xa8\x99y\xaa\xa7\x9ax\x88\xa7\x88t_\xf9\xff\x86w\x8f\xff\x15x\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2T20NS1\x00\x00\x00\x00',
b'\xf1\x87LDJUEA6010814HG1\x87w\x87x\x86gvw\x88\x88\x98\x88gw\x86wx\x88\x97\x88\x85o\xf8\xff\x86f_\xff\xd37\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS0\xf8\x19\x92g',
b'\xf1\x87LDJUEA6458264HG1ww\x87x\x97x\x87\x88\x88\x99\x98\x89g\x88\x86xw\x88\x97x\x86o\xf7\xffvw\x8f\xff3\x9a\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS0\xf8\x19\x92g',
b'\xf1\x87LDKUEA2045844HG1wwww\x98\x88x\x87\x88\x88\xa8\x88x\x99\x97\x89x\x88\xa7\x88U\x7f\xf8\xffvfO\xffC\x1e\xf1\x816W3E0051\x00\x00\xf1\x006W351_C2\x00\x006W3E0051\x00\x00TTM4T20NS3\x00\x00\x00\x00',
b'\xf1\x87LDKUEA9993304HG1\x87www\x97x\x87\x88\x99\x99\xa9\x99x\x99\xa7\x89w\x88\x97x\x86_\xf7\xffwwO\xffl#\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4T20NS1R\x7f\x90\n',
b'\xf1\x87LDLUEA6061564HG1\xa9\x99\x89\x98\x87wwwx\x88\x97\x88x\x99\xa7\x89x\x99\xa7\x89sO\xf9\xffvU_\xff<\xde\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed',
b'\xf1\x87LDLUEA6159884HG1\x88\x87hv\x99\x99y\x97\x89\xaa\xb8\x9ax\x99\x87\x89y\x99\xb7\x99\xa7?\xf7\xff\x97wo\xff\xf3\x05\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS5\x00\x00\x00\x00',
b'\xf1\x87LDLUEA6852664HG1\x97wWu\x97www\x89\xaa\xc8\x9ax\x99\x97\x89x\x99\xa7\x89SO\xf7\xff\xa8\x88\x7f\xff\x03z\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed',
b'\xf1\x87LDLUEA6898374HG1fevW\x87wwwx\x88\x97\x88h\x88\x96\x88x\x88\xa7\x88ao\xf9\xff\x98\x99\x7f\xffD\xe2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS5\x00\x00\x00\x00',
b'\xf1\x87LDLUEA6898374HG1fevW\x87wwwx\x88\x97\x88h\x88\x96\x88x\x88\xa7\x88ao\xf9\xff\x98\x99\x7f\xffD\xe2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4T20NS50\xcb\xc3\xed',
b'\xf1\x87SBJWAA5842214GG0\x88\x87\x88xww\x87x\x89\x99\xa8\x99\x88\x99\x98\x89w\x88\x87xw_\xfa\xfffU_\xff\xd1\x8d\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3',
b'\xf1\x87SBJWAA5890864GG0\xa9\x99\x89\x98\x98\x87\x98y\x89\x99\xa8\x99w\x88\x87xww\x87wvo\xfb\xffuD_\xff\x9f\xb5\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3',
b'\xf1\x87SBJWAA6562474GG0ffvgeTeFx\x88\x97\x88ww\x87www\x87w\x84o\xfa\xff\x87fO\xff\xc2 \xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x00\x00\x00\x00',
b'\xf1\x87SBJWAA6562474GG0ffvgeTeFx\x88\x97\x88ww\x87www\x87w\x84o\xfa\xff\x87fO\xff\xc2 \xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS1\x98{|\xe3',
b'\xf1\x87SBJWAA7780564GG0wvwgUUeVwwwwx\x88\x87\x88wwwwd_\xfc\xff\x86f\x7f\xff\xd7*\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS2F\x84<\xc0',
b'\xf1\x87SBJWAA8278284GG0ffvgUU\x85Xx\x88\x87\x88x\x88w\x88ww\x87w\x96o\xfd\xff\xa7U_\xff\xf2\xa0\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM2G24NS2F\x84<\xc0',
b'\xf1\x87SBLWAA4363244GG0wvwgwv\x87hgw\x86ww\x88\x87xww\x87wdo\xfb\xff\x86f\x7f\xff3$\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS6\x00\x00\x00\x00',
b'\xf1\x87SBLWAA6622844GG0wwwwff\x86hwwwwx\x88\x87\x88\x88\x88\x88\x88\x98?\xfd\xff\xa9\x88\x7f\xffn\xe5\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS7u\x1e{\x1c',
b'\xf1\x87SDJXAA7656854GG1DEtWUU\x85X\x88\x88\x98\x88w\x88\x87xx\x88\x87\x88\x96o\xfb\xff\x86f\x7f\xff.\xca\xf1\x816W3C2051\x00\x00\xf1\x006W351_C2\x00\x006W3C2051\x00\x00TTM4G24NS2\x00\x00\x00\x00',
b'\xf1\x87SDKXAA2443414GG1vfvgwv\x87h\x88\x88\x88\x88ww\x87wwwww\x99_\xfc\xffvD?\xffl\xd2\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM4G24NS6\x00\x00\x00\x00',
b'\xf1\x87SBLWAA4899564GG0VfvgUU\x85Xx\x88\x87\x88vfgf\x87wxwvO\xfb\xff\x97f\xb1\xffSB\xf1\x816W3E1051\x00\x00\xf1\x006W351_C2\x00\x006W3E1051\x00\x00TTM2G24NS7\x00\x00\x00\x00',
],
},
CAR.SANTA_FE_2022: {
(Ecu.fwdRadar, 0x7d0, None): [
b'\xf1\x00TM__ SCC F-CUP 1.00 1.00 99110-S1500 ',
b'\xf1\x8799110S1500\xf1\x00TM__ SCC F-CUP 1.00 1.00 99110-S1500 ',
],
(Ecu.esp, 0x7d1, None): [
b'\xf1\x00TM ESC \x02 101 \x08\x04 58910-S2GA0',
b'\xf1\x00TM ESC \x03 101 \x08\x02 58910-S2DA0',
b'\xf1\x8758910-S2DA0\xf1\x00TM ESC \x03 101 \x08\x02 58910-S2DA0',
b'\xf1\x8758910-S2GA0\xf1\x00TM ESC \x02 101 \x08\x04 58910-S2GA0',
],
(Ecu.engine, 0x7e0, | |
0 ],
[ "LD C, x", 0xe, 1 ],
[ "LD D, A", 0x57, 0 ],
# [ "LDD A, [HL]", 0x3a, 0 ],
[ "LD D, B", 0x50, 0 ],
[ "LD D, C", 0x51, 0 ],
[ "LD D, D", 0x52, 0 ],
[ "LD D, E", 0x53, 0 ],
[ "LD [DE], A", 0x12, 0 ],
[ "LD D, H", 0x54, 0 ],
[ "LD D, [HL]", 0x56, 0 ],
# [ "LDD [HL], A", 0x32, 0 ],
[ "LD D, L", 0x55, 0 ],
[ "LD D, x", 0x16, 1 ],
[ "LD E, A", 0x5f, 0 ],
[ "LD E, B", 0x58, 0 ],
[ "LD E, C", 0x59, 0 ],
[ "LD E, D", 0x5a, 0 ],
[ "LD E, E", 0x5b, 0 ],
[ "LD E, H", 0x5c, 0 ],
[ "LD E, [HL]", 0x5e, 0 ],
[ "LD E, L", 0x5d, 0 ],
[ "LD E, x", 0x1e, 1 ],
[ "LD [$FF00+C], A", 0xe2, 0 ],
[ "LD [$FF00+x], A", 0xe0, 1 ],
# [ "LDH [x], A", 0xe0, 1 ],
[ "LD H, A", 0x67, 0 ],
[ "LD H, B", 0x60, 0 ],
[ "LD H, C", 0x61, 0 ],
[ "LD H, D", 0x62, 0 ],
[ "LD H, E", 0x63, 0 ],
[ "LD H, H", 0x64, 0 ],
[ "LD H, [HL]", 0x66, 0 ],
[ "LD H, L", 0x65, 0 ],
# [ "LD [HL+], A", 0x22, 0 ],
# [ "LD [HL-], A", 0x32, 0 ],
[ "LD [HL], A", 0x77, 0 ],
[ "LD [HL], B", 0x70, 0 ],
[ "LD [HL], C", 0x71, 0 ],
[ "LD [HL], D", 0x72, 0 ],
[ "LD [HLD], A", 0x32, 0 ],
[ "LD [HL], E", 0x73, 0 ],
[ "LD [HL], H", 0x74, 0 ],
[ "LD [HLI], A", 0x22, 0 ],
[ "LD [HL], L", 0x75, 0 ],
[ "LD HL, SP+x", 0xf8, 1 ],
[ "LD [HL], x", 0x36, 1 ],
[ "LD H, x", 0x26, 1 ],
# [ "LDI A, [HL]", 0x2a, 0 ],
# [ "LDI [HL], A", 0x22, 0 ],
[ "LD L, A", 0x6f, 0 ],
[ "LD L, B", 0x68, 0 ],
[ "LD L, C", 0x69, 0 ],
[ "LD L, D", 0x6a, 0 ],
[ "LD L, E", 0x6b, 0 ],
[ "LD L, H", 0x6c, 0 ],
[ "LD L, [HL]", 0x6e, 0 ],
[ "LD L, L", 0x6d, 0 ],
[ "LD L, x", 0x2e, 1 ],
# [ "LD PC, HL", 0xe9, 0 ], #prefer jp [hl]
[ "LD SP, HL", 0xf9, 0 ],
[ "LD BC, ?", 0x1, 2 ],
[ "LD DE, ?", 0x11, 2 ],
[ "LD HL, ?", 0x21, 2 ],
[ "LD SP, ?", 0x31, 2 ],
# [ "LD [?], SP", 0x8, 2 ],
[ "LD [?], A", 0xea, 2 ],
[ "NOP", 0x0, 0 ],
[ "OR A", 0xb7, 0 ],
[ "OR B", 0xb0, 0 ],
[ "OR C", 0xb1, 0 ],
[ "OR D", 0xb2, 0 ],
[ "OR E", 0xb3, 0 ],
[ "OR H", 0xb4, 0 ],
[ "OR [HL]", 0xb6, 0 ],
[ "OR L", 0xb5, 0 ],
[ "OR x", 0xf6, 1 ],
[ "POP AF", 0xf1, 0 ],
[ "POP BC", 0xc1, 0 ],
[ "POP DE", 0xd1, 0 ],
[ "POP HL", 0xe1, 0 ],
[ "PUSH AF", 0xf5, 0 ],
[ "PUSH BC", 0xc5, 0 ],
[ "PUSH DE", 0xd5, 0 ],
[ "PUSH HL", 0xe5, 0 ],
[ "RES 0, A", 0x87cb, 3 ],
[ "RES 0, B", 0x80cb, 3 ],
[ "RES 0, C", 0x81cb, 3 ],
[ "RES 0, D", 0x82cb, 3 ],
[ "RES 0, E", 0x83cb, 3 ],
[ "RES 0, H", 0x84cb, 3 ],
[ "RES 0, [HL]", 0x86cb, 3 ],
[ "RES 0, L", 0x85cb, 3 ],
[ "RES 1, A", 0x8fcb, 3 ],
[ "RES 1, B", 0x88cb, 3 ],
[ "RES 1, C", 0x89cb, 3 ],
[ "RES 1, D", 0x8acb, 3 ],
[ "RES 1, E", 0x8bcb, 3 ],
[ "RES 1, H", 0x8ccb, 3 ],
[ "RES 1, [HL]", 0x8ecb, 3 ],
[ "RES 1, L", 0x8dcb, 3 ],
[ "RES 2, A", 0x97cb, 3 ],
[ "RES 2, B", 0x90cb, 3 ],
[ "RES 2, C", 0x91cb, 3 ],
[ "RES 2, D", 0x92cb, 3 ],
[ "RES 2, E", 0x93cb, 3 ],
[ "RES 2, H", 0x94cb, 3 ],
[ "RES 2, [HL]", 0x96cb, 3 ],
[ "RES 2, L", 0x95cb, 3 ],
[ "RES 3, A", 0x9fcb, 3 ],
[ "RES 3, B", 0x98cb, 3 ],
[ "RES 3, C", 0x99cb, 3 ],
[ "RES 3, D", 0x9acb, 3 ],
[ "RES 3, E", 0x9bcb, 3 ],
[ "RES 3, H", 0x9ccb, 3 ],
[ "RES 3, [HL]", 0x9ecb, 3 ],
[ "RES 3, L", 0x9dcb, 3 ],
[ "RES 4, A", 0xa7cb, 3 ],
[ "RES 4, B", 0xa0cb, 3 ],
[ "RES 4, C", 0xa1cb, 3 ],
[ "RES 4, D", 0xa2cb, 3 ],
[ "RES 4, E", 0xa3cb, 3 ],
[ "RES 4, H", 0xa4cb, 3 ],
[ "RES 4, [HL]", 0xa6cb, 3 ],
[ "RES 4, L", 0xa5cb, 3 ],
[ "RES 5, A", 0xafcb, 3 ],
[ "RES 5, B", 0xa8cb, 3 ],
[ "RES 5, C", 0xa9cb, 3 ],
[ "RES 5, D", 0xaacb, 3 ],
[ "RES 5, E", 0xabcb, 3 ],
[ "RES 5, H", 0xaccb, 3 ],
[ "RES 5, [HL]", 0xaecb, 3 ],
[ "RES 5, L", 0xadcb, 3 ],
[ "RES 6, A", 0xb7cb, 3 ],
[ "RES 6, B", 0xb0cb, 3 ],
[ "RES 6, C", 0xb1cb, 3 ],
[ "RES 6, D", 0xb2cb, 3 ],
[ "RES 6, E", 0xb3cb, 3 ],
[ "RES 6, H", 0xb4cb, 3 ],
[ "RES 6, [HL]", 0xb6cb, 3 ],
[ "RES 6, L", 0xb5cb, 3 ],
[ "RES 7, A", 0xbfcb, 3 ],
[ "RES 7, B", 0xb8cb, 3 ],
[ "RES 7, C", 0xb9cb, 3 ],
[ "RES 7, D", 0xbacb, 3 ],
[ "RES 7, E", 0xbbcb, 3 ],
[ "RES 7, H", 0xbccb, 3 ],
[ "RES 7, [HL]", 0xbecb, 3 ],
[ "RES 7, L", 0xbdcb, 3 ],
[ "RETI", 0xd9, 0 ],
[ "RET C", 0xd8, 0 ],
[ "RET NC", 0xd0, 0 ],
[ "RET NZ", 0xc0, 0 ],
[ "RET Z", 0xc8, 0 ],
[ "RET", 0xc9, 0 ],
[ "RLA", 0x17, 0 ],
[ "RL A", 0x17cb, 3 ],
[ "RL B", 0x10cb, 3 ],
[ "RL C", 0x11cb, 3 ],
[ "RLCA", 0x7, 0 ],
[ "RLC A", 0x7cb, 3 ],
[ "RLC B", 0xcb, 3 ],
[ "RLC C", 0x1cb, 3 ],
[ "RLC D", 0x2cb, 3 ],
[ "RLC E", 0x3cb, 3 ],
[ "RLC H", 0x4cb, 3 ],
[ "RLC [HL]", 0x6cb, 3 ],
[ "RLC L", 0x5cb, 3 ],
[ "RL D", 0x12cb, 3 ],
[ "RL E", 0x13cb, 3 ],
[ "RL H", 0x14cb, 3 ],
[ "RL [HL]", 0x16cb, 3 ],
[ "RL L", 0x15cb, 3 ],
[ "RRA", 0x1f, 0 ],
[ "RR A", 0x1fcb, 3 ],
[ "RR B", 0x18cb, 3 ],
[ "RR C", 0x19cb, 3 ],
[ "RRCA", 0xf, 0 ],
[ "RRC A", 0xfcb, 3 ],
[ "RRC B", 0x8cb, 3 ],
[ "RRC C", 0x9cb, 3 ],
[ "RRC D", 0xacb, 3 ],
[ "RRC E", 0xbcb, 3 ],
[ "RRC H", 0xccb, 3 ],
[ "RRC [HL]", 0xecb, 3 ],
[ "RRC L", 0xdcb, 3 ],
[ "RR D", 0x1acb, 3 ],
[ "RR E", 0x1bcb, 3 ],
[ "RR H", 0x1ccb, 3 ],
[ "RR [HL]", 0x1ecb, 3 ],
[ "RR L", 0x1dcb, 3 ],
[ "RST $0", 0xc7, 0 ],
[ "RST $10", 0xd7, 0 ],
| |
# Taken from https://github.com/CompVis/taming-transformers
# pytorch_diffusion + derived encoder decoder
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from fast_pytorch_kmeans import KMeans
from torch import einsum
import torch.distributed as dist
from einops import rearrange
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0,1,0,0))
return emb
def nonlinearity(x):
# swish
return x*torch.sigmoid(x)
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
class Upsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
if self.with_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
# no asymmetric padding in torch conv, must do it ourselves
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=2,
padding=0)
def forward(self, x):
if self.with_conv:
pad = (0,1,0,1)
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
x = self.conv(x)
else:
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
return x
class ResnetBlock(nn.Module):
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, dropout):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.norm1 = Normalize(in_channels)
self.conv1 = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
self.norm2 = Normalize(out_channels)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = torch.nn.Conv2d(out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
self.conv_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
else:
self.nin_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x):
h = x
h = self.norm1(h)
h = nonlinearity(h)
h = self.conv1(h)
h = self.norm2(h)
h = nonlinearity(h)
h = self.dropout(h)
h = self.conv2(h)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
x = self.conv_shortcut(x)
else:
x = self.nin_shortcut(x)
return x+h
class AttnBlock(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.k = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.v = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.proj_out = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
# compute attention
b,c,h,w = q.shape
q = q.reshape(b,c,h*w)
q = q.permute(0,2,1) # b,hw,c
k = k.reshape(b,c,h*w) # b,c,hw
w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
w_ = w_ * (int(c)**(-0.5))
w_ = torch.nn.functional.softmax(w_, dim=2)
# attend to values
v = v.reshape(b,c,h*w)
w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
h_ = h_.reshape(b,c,h,w)
h_ = self.proj_out(h_)
return x+h_
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class Encoder(nn.Module):
"""
Encoder of VQ-GAN to map input batch of images to latent space.
Dimension Transformations:
3x256x256 --Conv2d--> 32x256x256
for loop:
--ResBlock--> 64x256x256 --DownBlock--> 64x128x128
--ResBlock--> 128x128x128 --DownBlock--> 128x64x64
--ResBlock--> 256x64x64 --DownBlock--> 256x32x32
--ResBlock--> 512x32x32
--ResBlock--> 512x32x32
--NonLocalBlock--> 512x32x32
--ResBlock--> 512x32x32
--GroupNorm-->
--Swish-->
--Conv2d-> 256x32x32
"""
def __init__(self, in_channels=3, channels=[128, 128, 128, 256, 512, 512], attn_resolutions=[32], resolution=512, dropout=0.0, num_res_blocks=2, z_channels=256, **kwargs):
super(Encoder, self).__init__()
layers = [nn.Conv2d(in_channels, channels[0], 3, 1, 1)]
for i in range(len(channels) - 1):
in_channels = channels[i]
out_channels = channels[i + 1]
for j in range(num_res_blocks):
layers.append(ResnetBlock(in_channels=in_channels, out_channels=out_channels, dropout=0.0))
in_channels = out_channels
if resolution in attn_resolutions:
layers.append(AttnBlock(in_channels))
if i < len(channels) - 2:
layers.append(Downsample(channels[i + 1], with_conv=True))
resolution //= 2
layers.append(ResnetBlock(in_channels=channels[-1], out_channels=channels[-1], dropout=0.0))
layers.append(AttnBlock(channels[-1]))
layers.append(ResnetBlock(in_channels=channels[-1], out_channels=channels[-1], dropout=0.0))
layers.append(Normalize(channels[-1]))
layers.append(Swish())
layers.append(nn.Conv2d(channels[-1], z_channels, 3, 1, 1))
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
# class Encoder(nn.Module):
# def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
# attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
# resolution, z_channels, double_z=True, **ignore_kwargs):
# super().__init__()
# self.ch = ch
# self.temb_ch = 0
# self.num_resolutions = len(ch_mult)
# self.num_res_blocks = num_res_blocks
# self.resolution = resolution
# self.in_channels = in_channels
#
# # downsampling
# self.conv_in = torch.nn.Conv2d(in_channels,
# self.ch,
# kernel_size=3,
# stride=1,
# padding=1)
#
# curr_res = resolution
# in_ch_mult = (1,)+tuple(ch_mult)
# self.down = nn.ModuleList()
# for i_level in range(self.num_resolutions):
# block = nn.ModuleList()
# attn = nn.ModuleList()
# block_in = ch*in_ch_mult[i_level]
# block_out = ch*ch_mult[i_level]
# for i_block in range(self.num_res_blocks):
# block.append(ResnetBlock(in_channels=block_in,
# out_channels=block_out,
# temb_channels=self.temb_ch,
# dropout=dropout))
# block_in = block_out
# if curr_res in attn_resolutions:
# attn.append(AttnBlock(block_in))
# down = nn.Module()
# down.block = block
# down.attn = attn
# if i_level != self.num_resolutions-1:
# down.downsample = Downsample(block_in, resamp_with_conv)
# curr_res = curr_res // 2
# self.down.append(down)
#
# # middle
# self.mid = nn.Module()
# self.mid.block_1 = ResnetBlock(in_channels=block_in,
# out_channels=block_in,
# temb_channels=self.temb_ch,
# dropout=dropout)
# self.mid.attn_1 = AttnBlock(block_in)
# self.mid.block_2 = ResnetBlock(in_channels=block_in,
# out_channels=block_in,
# temb_channels=self.temb_ch,
# dropout=dropout)
#
# # end
# self.norm_out = Normalize(block_in)
# self.conv_out = torch.nn.Conv2d(block_in,
# 2*z_channels if double_z else z_channels,
# kernel_size=3,
# stride=1,
# padding=1)
#
#
# def forward(self, x):
# #assert x.shape[2] == x.shape[3] == self.resolution, "{}, {}, {}".format(x.shape[2], x.shape[3], self.resolution)
#
# # timestep embedding
# temb = None
#
# # downsampling
# hs = [self.conv_in(x)]
# for i_level in range(self.num_resolutions):
# for i_block in range(self.num_res_blocks):
# h = self.down[i_level].block[i_block](hs[-1], temb)
# if len(self.down[i_level].attn) > 0:
# h = self.down[i_level].attn[i_block](h)
# hs.append(h)
# if i_level != self.num_resolutions-1:
# hs.append(self.down[i_level].downsample(hs[-1]))
#
# # middle
# h = hs[-1]
# h = self.mid.block_1(h, temb)
# h = self.mid.attn_1(h)
# h = self.mid.block_2(h, temb)
#
# # end
# h = self.norm_out(h)
# h = nonlinearity(h)
# h = self.conv_out(h)
# return h
class Decoder(nn.Module):
def __init__(self, out_channels=3, channels=[128, 128, 128, 256, 512, 512], attn_resolutions=[32], resolution=512, dropout=0.0, num_res_blocks=2, z_channels=256, **kwargs):
super(Decoder, self).__init__()
ch_mult = channels[1:]
num_resolutions = len(ch_mult)
block_in = ch_mult[num_resolutions - 1]
curr_res = resolution// 2 ** (num_resolutions - 1)
layers = [nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1),
ResnetBlock(in_channels=block_in, out_channels=block_in, dropout=0.0),
AttnBlock(block_in),
ResnetBlock(in_channels=block_in, out_channels=block_in, dropout=0.0)
]
for i in reversed(range(num_resolutions)):
block_out = ch_mult[i]
for i_block in range(num_res_blocks+1):
layers.append(ResnetBlock(in_channels=block_in, out_channels=block_out, dropout=0.0))
block_in = block_out
if curr_res in attn_resolutions:
layers.append(AttnBlock(block_in))
if i > 0:
layers.append(Upsample(block_in, with_conv=True))
curr_res = curr_res * 2
layers.append(Normalize(block_in))
layers.append(Swish())
layers.append(nn.Conv2d(block_in, out_channels, kernel_size=3, stride=1, padding=1))
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
# class Decoder(nn.Module):
# def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
# attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
# resolution, z_channels, **ignorekwargs):
# super().__init__()
# self.temb_ch = 0
# self.num_resolutions = len(ch_mult)
# self.num_res_blocks = num_res_blocks
# self.resolution = resolution
# self.in_channels = in_channels
#
# block_in = ch*ch_mult[self.num_resolutions-1]
# curr_res = resolution // 2**(self.num_resolutions-1)
# self.z_shape = (1,z_channels,curr_res,curr_res)
#
# # z to block_in
# self.conv_in = torch.nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1)
#
# # middle
# self.mid = nn.Module()
# self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout)
# self.mid.attn_1 = AttnBlock(block_in)
# self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout)
#
# # upsampling
# self.up = nn.ModuleList()
# for i_level in reversed(range(self.num_resolutions)):
# block = nn.ModuleList()
# attn = nn.ModuleList()
# block_out = ch*ch_mult[i_level]
# for i_block in range(self.num_res_blocks+1):
# block.append(ResnetBlock(in_channels=block_in,
# out_channels=block_out,
# temb_channels=self.temb_ch,
# dropout=dropout))
# block_in = block_out
# if curr_res in attn_resolutions:
# attn.append(AttnBlock(block_in))
# up = nn.Module()
# up.block = block
# up.attn = attn
# if i_level != 0:
# up.upsample = Upsample(block_in, resamp_with_conv)
# curr_res = curr_res * 2
# self.up.insert(0, up) # prepend to get consistent order
#
# # end
# self.norm_out = Normalize(block_in)
# self.conv_out = torch.nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1)
#
# def forward(self, z):
# self.last_z_shape = z.shape
#
# # timestep embedding
# temb = None
#
# # z to block_in
# h = self.conv_in(z)
#
# # middle
# h = self.mid.block_1(h, temb)
# h = self.mid.attn_1(h)
# h = self.mid.block_2(h, temb)
#
# # upsampling
# for i_level in reversed(range(self.num_resolutions)):
# for i_block in range(self.num_res_blocks+1):
# h = self.up[i_level].block[i_block](h, temb)
# if len(self.up[i_level].attn) > 0:
# h = self.up[i_level].attn[i_block](h)
# if i_level != 0:
# h = self.up[i_level].upsample(h)
#
# h = self.norm_out(h)
# h = nonlinearity(h)
# h = self.conv_out(h)
# return h
class Codebook(nn.Module):
"""
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
avoids costly matrix multiplications and allows for post-hoc remapping of indices.
"""
def __init__(self, codebook_size, codebook_dim, beta, init_steps=2000, reservoir_size=2e5):
super().__init__()
self.codebook_size = codebook_size
self.codebook_dim = codebook_dim
self.beta = beta
self.embedding = nn.Embedding(self.codebook_size, self.codebook_dim)
self.embedding.weight.data.uniform_(-1.0 / self.codebook_size, 1.0 / self.codebook_size)
self.q_start_collect, self.q_init, self.q_re_end, self.q_re_step = init_steps, init_steps * 3, init_steps * 30, init_steps // 2
self.q_counter = 0
self.reservoir_size = int(reservoir_size)
self.reservoir = None
def forward(self, z):
z = rearrange(z, 'b c h w -> b h w c').contiguous()
batch_size = z.size(0)
z_flattened = z.view(-1, self.codebook_dim)
if self.training:
self.q_counter += 1
# x_flat = x.permute(0, 2, 3, 1).reshape(-1, z.shape(1))
if self.q_counter > self.q_start_collect:
z_new = z_flattened.clone().detach().view(batch_size, -1, self.codebook_dim)
z_new = z_new[:, torch.randperm(z_new.size(1))][:, :10].reshape(-1, | |
not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import re
from falcon import HTTP_METHODS
def get_default_logger(level=None):
logger = logging.getLogger("falcon_cors")
logger.setLevel(logging.INFO)
logger.propogate = False
if not logger.handlers:
handler = logging.StreamHandler()
logger.addHandler(handler)
return logger
class CORSMiddleware:
"""This is the middleware that applies a CORS object to requests.
Args:
cors (CORS, required): An instance of :py:class:`~falcon.cors.CORS`.
default_enabled (bool, optional): Whether CORS processing should
take place for every resource. Default ``True``.
"""
def __init__(self, cors, default_enabled=True):
self.cors = cors
self.default_enabled = default_enabled
def process_resource(self, req, resp, resource, *args):
if not getattr(resource, 'cors_enabled', self.default_enabled):
return
cors = getattr(resource, 'cors', self.cors)
cors.process(req, resp, resource)
class CORS(object):
"""
Initialize a CORS object, passing in configuration options.
All of the configuration settings are optional, however if none
of them are specified the default configuration will simply
deny all CORS requests. You can pass this to
:py:class:`~falcon.api.API` for a global configuration.
After enabling globally, you can override the settings for a
particular resource by setting the 'cors' attribute on it to
an instance of this class.
Args:
logger(:py:meth:`logging.Logger`, optional):
Specifies the logger to use. A basic logger and StreamHandler
will be configure for you if none is provided.
allow_all_origins(bool, optional): Specifies whether CORS
should allow requests from all origins. Default is ``False``.
allow_origins_list(list, optional): A list of
origins that are allowed to make CORS requests. Default is empty.
allow_origins_regex(str, optional): A string containing
a Python regular expression that matches origins which
are allowed to make CORS requests. Default is ``None``.
allow_all_headers(bool, optional): If ``True``, when the server is
responding to a preflight request it will approve any headers
requested by the client via the Access-Control-Request-Headers
header, setting each requested header in the
value of the Access-Control-Allow-Headers header in the response.
Default is ``False``.
allow_headers_list(list, optional): A list of headers which are
allowed values for the Access-Control-Allow-Headers header
in response to a preflight request. When the server is
responding to a preflight request, it will check each header
requested by the client in the Access-Control-Request-Headers
header to see if it exists in this list. If it does, it
will be included in the Access-Control-Allow-Headers header
in the response to the preflight request.
Default is empty.
allow_headers_regex(str, optional): A string containing a Python
regular expression that matches headers that should be
allowed in response to a preflight request. If this is set,
when a preflight request is received by the server, it will
try to match each header requested by the client via the
Access-Control-Request-Headers header of the request. If
the requested header is matched by this regex, it will be
included in the value of the Access-Control-Allow-Headers
header of the response.
expose_headers_list(list, optional): A list of headers that
should be sent as values to the Access-Control-Expose-Headers
header in response to simple or actual requests.
allow_all_methods(bool, optional): Specifies whether all methods
are allowed via CORS requests. Default is ``False``.
allow_methods_list(list, optional): A list of methods which are
allowed via CORS requests. These should be values from
``falcon.HTTP_METHODS``, which are strings like 'GET' and 'PATCH'.
Default is empty.
allow_credentials_all_origins(bool, optional): Where or not the
Access-Control-Allow-Credentials should be set to True
and set on all responses. Default is ``False``.
allow_credentials_origins_list(list, optional): A list of
origins for which the Access-Control-Allow-Credentials
header should be set to True and included with all
responses. Default is empty.
allow_credentials_origins_regex(string, optional): A string
containing a Python regular expression matching origins
for which the Access-Control-Allow-Credentials header
should be set to True and included in all responses.
Default is ``None``.
max_age(int, optional): If set to an integer, this value
will be used as the value of the Access-Control-Max-Age
header in response to preflight requests. This is
in seconds the maximum amount of time a client may cache
responses to preflight requests.
Default is ``None`` (no header sent).
Note:
The arguments above are inclusie, meaning a header, origin, or method
will only be disallowed if it doesn't match ANY specification.
First the allow_all directive is checked, then the list directive,
then the regex directive if applicable, then list by method if applicable,
and lastly regex by method if applicable. For instance, this means if
you specify 'Auth-Key' in allow_headers_list, it will be allowed for all
methods regardless of the values in header_list_By_method.
Note:
Headers are converted to lower-case for you.
Methods are converted to upper-case for you.
Take note of this if you are writing regular expressions.
Note:
The allow_headers_* settings relate to the Access-Control-Allow-Headers
header which is only sent in response to pre-flight requests.
This is different from the Access-Control-Expose-Headers header which
is set via the expose_headers_list setting and is sent only in response
to basic or actual requests.
Warning:
Exercise caution when using the regex enabled settings. It is very
easy to misunderstand Python regex syntax and accidentally
introduce an unintentionally allowed origin or other vulnerability
into your application.
"""
def __init__(self, **cors_config):
default_cors_config = {
'logger': get_default_logger(),
'log_level': None,
'allow_all_origins': False,
'allow_origins_list': [],
'allow_origins_regex': None,
'allow_all_headers': False,
'allow_headers_list': [],
'allow_headers_regex': None,
'expose_headers_list': [],
'allow_all_methods': False,
'allow_methods_list': [],
'allow_credentials_all_origins': False,
'allow_credentials_origins_list': [],
'allow_credentials_origins_regex': None,
'max_age': None
}
for cors_setting, setting_value in default_cors_config.items():
cors_config.setdefault(cors_setting, setting_value)
unknown_settings = list(set(cors_config.keys()) -
set(default_cors_config.keys()))
if unknown_settings:
raise ValueError(
'Unknown CORS settings: {0}'.format(unknown_settings))
self.logger = cors_config["logger"]
if cors_config["log_level"] is not None:
level = logging.getLevelName(cors_config["log_level"])
self.logger.setLevel(level)
unknown_methods = list(set(
cors_config['allow_methods_list']) - set(HTTP_METHODS))
if unknown_methods:
raise ValueError(
'Unknown methods specified for '
'allow_methods_list: {0}'.format(unknown_methods))
self._compile_keys(
cors_config,
[
'allow_origins_regex', 'allow_headers_regex',
'allow_credentials_origins_regex'
])
cors_config['allow_methods_list'] = [
method.upper() for method in cors_config['allow_methods_list']
]
for header_list_key in ['allow_headers_list', 'expose_headers_list']:
cors_config[header_list_key] = [
header.lower() for header in cors_config[header_list_key]
]
# We need to detect if we support credentials, if we do
# we cannot set Access-Control-Allow-Origin to *
self.supports_credentials = False
for credentials_key in [
'allow_credentials_all_origins',
'allow_credentials_origins_list',
'allow_credentials_origins_regex'
]:
if cors_config[credentials_key]:
self.supports_credentials = True
self.logger.debug(
"supports_credentials: {0}".format(
self.supports_credentials
)
)
# Detect if we need to send 'Vary: Origin' header
# This needs to be set if any decisions about which headers to send
# are being made based on the Origin header the client sends
self.origins_vary | |
"""CUDA target independent of PyCUDA."""
from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2015 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
from pytools import memoize_method
from loopy.target.c import CFamilyTarget, CFamilyASTBuilder
from loopy.target.c.codegen.expression import ExpressionToCExpressionMapper
from loopy.diagnostic import LoopyError
from loopy.types import NumpyType
from loopy.kernel.data import AddressSpace
from pymbolic import var
from loopy.kernel.function_interface import ScalarCallable
# {{{ vector types
class vec: # noqa
pass
def _create_vector_types():
field_names = ["x", "y", "z", "w"]
import sys
if sys.maxsize <= 2**33:
long_dtype = np.int32
ulong_dtype = np.uint32
else:
long_dtype = np.int64
ulong_dtype = np.uint64
vec.types = {}
vec.names_and_dtypes = []
vec.type_to_scalar_and_count = {}
for base_name, base_type, counts in [
('char', np.int8, [1, 2, 3, 4]),
('uchar', np.uint8, [1, 2, 3, 4]),
('short', np.int16, [1, 2, 3, 4]),
('ushort', np.uint16, [1, 2, 3, 4]),
('int', np.int32, [1, 2, 3, 4]),
('uint', np.uint32, [1, 2, 3, 4]),
('long', long_dtype, [1, 2, 3, 4]),
('ulong', ulong_dtype, [1, 2, 3, 4]),
('longlong', np.int64, [1, 2]),
('ulonglong', np.uint64, [1, 2]),
('float', np.float32, [1, 2, 3, 4]),
('double', np.float64, [1, 2]),
]:
for count in counts:
name = "%s%d" % (base_name, count)
titles = field_names[:count]
names = ["s%d" % i for i in range(count)]
if len(titles) < len(names):
titles.extend((len(names)-len(titles))*[None])
try:
dtype = np.dtype(dict(
names=names,
formats=[base_type]*count,
titles=titles))
except NotImplementedError:
try:
dtype = np.dtype([((n, title), base_type)
for (n, title) in zip(names, titles)])
except TypeError:
dtype = np.dtype([(n, base_type) for (n, title)
in zip(names, titles)])
setattr(vec, name, dtype)
vec.names_and_dtypes.append((name, dtype))
vec.types[np.dtype(base_type), count] = dtype
vec.type_to_scalar_and_count[dtype] = np.dtype(base_type), count
_create_vector_types()
def _register_vector_types(dtype_registry):
for name, dtype in vec.names_and_dtypes:
dtype_registry.get_or_register_dtype(name, dtype)
# }}}
# {{{ function scoper
_CUDA_SPECIFIC_FUNCTIONS = {
"rsqrt": 1,
"atan2": 2,
}
class CudaCallable(ScalarCallable):
def cuda_with_types(self, arg_id_to_dtype, caller_kernel,
callables_table):
name = self.name
if name == "dot":
for id in arg_id_to_dtype:
if not -1 <= id <= 1:
raise LoopyError("%s can take only 2 arguments." % name)
if 0 not in arg_id_to_dtype or 1 not in arg_id_to_dtype or (
arg_id_to_dtype[0] is None or arg_id_to_dtype[1] is None):
# the types provided aren't mature enough to specialize the
# callable
return (
self.copy(arg_id_to_dtype=arg_id_to_dtype),
callables_table)
dtype = arg_id_to_dtype[0]
scalar_dtype, offset, field_name = dtype.numpy_dtype.fields["x"]
return (
self.copy(name_in_target=name, arg_id_to_dtype={-1:
NumpyType(scalar_dtype),
0: dtype, 1: dtype}),
callables_table)
if name in _CUDA_SPECIFIC_FUNCTIONS:
num_args = _CUDA_SPECIFIC_FUNCTIONS[name]
for id in arg_id_to_dtype:
if not -1 <= id < num_args:
raise LoopyError("%s can take only %d arguments." % (name,
num_args))
for i in range(num_args):
if i not in arg_id_to_dtype or arg_id_to_dtype[i] is None:
# the types provided aren't mature enough to specialize the
# callable
return (
self.copy(arg_id_to_dtype=arg_id_to_dtype),
callables_table)
dtype = np.find_common_type(
[], [dtype.numpy_dtype for id, dtype in
arg_id_to_dtype.items() if id >= 0])
if dtype.kind == "c":
raise LoopyError("%s does not support complex numbers"
% name)
updated_arg_id_to_dtype = dict((id, NumpyType(dtype)) for id in range(-1,
num_args))
return (
self.copy(name_in_target=name,
arg_id_to_dtype=updated_arg_id_to_dtype),
callables_table)
return (
self.copy(arg_id_to_dtype=arg_id_to_dtype),
callables_table)
def scope_cuda_functions(target, identifier):
if identifier in set(["dot"]) | set(
_CUDA_SPECIFIC_FUNCTIONS):
return CudaCallable(name=identifier)
return None
# }}}
# {{{ expression mapper
class ExpressionToCudaCExpressionMapper(ExpressionToCExpressionMapper):
_GRID_AXES = "xyz"
@staticmethod
def _get_index_ctype(kernel):
if kernel.index_dtype.numpy_dtype == np.int32:
return "int32_t"
elif kernel.index_dtype.numpy_dtype == np.int64:
return "int64_t"
else:
raise LoopyError("unexpected index type")
def map_group_hw_index(self, expr, type_context):
return var("((%s) blockIdx.%s)" % (
self._get_index_ctype(self.kernel),
self._GRID_AXES[expr.axis]))
def map_local_hw_index(self, expr, type_context):
return var("((%s) threadIdx.%s)" % (
self._get_index_ctype(self.kernel),
self._GRID_AXES[expr.axis]))
# }}}
# {{{ target
class CudaTarget(CFamilyTarget):
"""A target for Nvidia's CUDA GPU programming language."""
def __init__(self, extern_c=True):
"""
:arg extern_c: If *True*, declare kernels using "extern C" to
avoid name mangling.
"""
self.extern_c = extern_c
super(CudaTarget, self).__init__()
def split_kernel_at_global_barriers(self):
return True
def get_device_ast_builder(self):
return CUDACASTBuilder(self)
# {{{ types
@memoize_method
def get_dtype_registry(self):
from loopy.target.c.compyte.dtypes import (DTypeRegistry,
fill_registry_with_opencl_c_types)
result = DTypeRegistry()
fill_registry_with_opencl_c_types(result)
# no complex number support--needs PyOpenCLTarget
_register_vector_types(result)
return result
def is_vector_dtype(self, dtype):
return (isinstance(dtype, NumpyType)
and dtype.numpy_dtype in list(vec.types.values()))
def vector_dtype(self, base, count):
return NumpyType(
vec.types[base.numpy_dtype, count],
target=self)
# }}}
# }}}
# {{{ preamable generator
def cuda_preamble_generator(preamble_info):
from loopy.types import AtomicNumpyType
seen_64_bit_atomics = any(
isinstance(dtype, AtomicNumpyType) and dtype.numpy_dtype.itemsize == 8
for dtype in preamble_info.seen_atomic_dtypes)
if seen_64_bit_atomics:
# Source:
# docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions
yield ("00_enable_64bit_atomics", """
#if __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
""")
# }}}
# {{{ ast builder
class CUDACASTBuilder(CFamilyASTBuilder):
# {{{ library
def function_id_in_knl_callable_mapper(self):
return [scope_cuda_functions] + (
super(CUDACASTBuilder, self).function_id_in_knl_callable_mapper())
# }}}
# {{{ top-level codegen
def get_function_declaration(self, codegen_state, codegen_result,
schedule_index):
fdecl = super(CUDACASTBuilder, self).get_function_declaration(
codegen_state, codegen_result, schedule_index)
from loopy.target.c import FunctionDeclarationWrapper
assert isinstance(fdecl, FunctionDeclarationWrapper)
fdecl = fdecl.subdecl
from cgen.cuda import CudaGlobal, CudaLaunchBounds
fdecl = CudaGlobal(fdecl)
if self.target.extern_c:
from cgen import Extern
fdecl = Extern("C", fdecl)
from loopy.schedule import get_insn_ids_for_block_at
_, local_grid_size = \
codegen_state.kernel.get_grid_sizes_for_insn_ids_as_exprs(
get_insn_ids_for_block_at(
codegen_state.kernel.schedule, schedule_index),
codegen_state.callables_table)
from loopy.symbolic import get_dependencies
if not get_dependencies(local_grid_size):
# Sizes can't have parameter dependencies if they are
# to be used in static thread block size.
from pytools import product
nthreads = product(local_grid_size)
fdecl = CudaLaunchBounds(nthreads, fdecl)
return FunctionDeclarationWrapper(fdecl)
def preamble_generators(self):
return (
super(CUDACASTBuilder, self).preamble_generators() + [
cuda_preamble_generator])
# }}}
# {{{ code generation guts
def get_expression_to_c_expression_mapper(self, codegen_state):
return ExpressionToCudaCExpressionMapper(codegen_state)
_VEC_AXES = "xyzw"
def add_vector_access(self, access_expr, index):
return access_expr.attr(self._VEC_AXES[index])
def emit_barrier(self, synchronization_kind, mem_kind, comment):
"""
:arg kind: ``"local"`` or ``"global"``
:arg memkind: unused
:return: a :class:`loopy.codegen.GeneratedInstruction`.
"""
if synchronization_kind == "local":
if comment:
comment = " /* %s */" % comment
from cgen import Statement
return Statement("__syncthreads()%s" % comment)
elif synchronization_kind == "global":
raise LoopyError("CUDA does not have global barriers")
else:
raise LoopyError("unknown barrier kind")
def wrap_temporary_decl(self, decl, scope):
if scope == AddressSpace.LOCAL:
from cgen.cuda import CudaShared
return CudaShared(decl)
elif scope == AddressSpace.PRIVATE:
return decl
else:
raise ValueError("unexpected temporary variable scope: %s"
% scope)
def wrap_global_constant(self, decl):
from cgen.cuda import CudaConstant
return CudaConstant(decl)
def get_array_arg_decl(self, name, mem_address_space, shape, dtype, is_written):
from loopy.target.c import POD # uses the correct complex type
from cgen import Const
from cgen.cuda import CudaRestrictPointer
arg_decl = CudaRestrictPointer(POD(self, dtype, name))
if not is_written:
arg_decl = Const(arg_decl)
return arg_decl
def get_global_arg_decl(self, name, shape, dtype, is_written):
from warnings import warn
warn("get_global_arg_decl is deprecated use get_array_arg_decl "
"instead.", DeprecationWarning, stacklevel=2)
return self.get_array_arg_decl(name, AddressSpace.GLOBAL, shape,
dtype, is_written)
def get_image_arg_decl(self, name, shape, num_target_axes, dtype, is_written):
raise NotImplementedError("not yet: texture arguments in CUDA")
def get_constant_arg_decl(self, name, shape, dtype, is_written):
from loopy.target.c import POD # uses the correct complex type
from cgen import RestrictPointer, Const
from cgen.cuda import CudaConstant
arg_decl = RestrictPointer(POD(self, dtype, name))
if not is_written:
arg_decl = Const(arg_decl)
return CudaConstant(arg_decl)
# {{{ code generation for atomic update
def emit_atomic_update(self, codegen_state, lhs_atomicity, lhs_var,
lhs_expr, rhs_expr, lhs_dtype, rhs_type_context):
from pymbolic.primitives import Sum
from cgen import Statement
from pymbolic.mapper.stringifier import PREC_NONE
if isinstance(lhs_dtype, NumpyType) and lhs_dtype.numpy_dtype in [
np.int32, np.int64, np.float32, np.float64]:
# atomicAdd
if isinstance(rhs_expr, Sum):
ecm = self.get_expression_to_code_mapper(codegen_state)
new_rhs_expr = Sum(tuple(c for c in rhs_expr.children
if c != lhs_expr))
lhs_expr_code = ecm(lhs_expr)
rhs_expr_code = ecm(new_rhs_expr)
return Statement("atomicAdd(&{0}, {1})".format(
lhs_expr_code, rhs_expr_code))
else:
from cgen import Block, DoWhile, Assign
from loopy.target.c import POD
old_val_var = codegen_state.var_name_generator("loopy_old_val")
new_val_var = codegen_state.var_name_generator("loopy_new_val")
from loopy.kernel.data import TemporaryVariable
ecm = codegen_state.expression_to_code_mapper.with_assignments(
{
old_val_var: TemporaryVariable(old_val_var, lhs_dtype),
new_val_var: TemporaryVariable(new_val_var, lhs_dtype),
})
lhs_expr_code = ecm(lhs_expr, prec=PREC_NONE, type_context=None)
from pymbolic.mapper.substitutor import | |
PIT_ID CHAR(8) COMMENT 'Pitcher',
PIT_HAND_CD CHAR(1) COMMENT 'Pitcher hand',
RESP_PIT_ID CHAR(8) COMMENT 'Result pitcher',
RESP_PIT_HAND_CD CHAR(1) COMMENT 'Result pitcher hand',
POS2_FLD_ID CHAR(8) COMMENT 'Catcher',
POS3_FLD_ID CHAR(8) COMMENT 'First baseman',
POS4_FLD_ID CHAR(8) COMMENT 'Second baseman',
POS5_FLD_ID CHAR(8) COMMENT 'Third baseman',
POS6_FLD_ID CHAR(8) COMMENT 'Shortstop',
POS7_FLD_ID CHAR(8) COMMENT 'Left fielder',
POS8_FLD_ID CHAR(8) COMMENT 'Center fielder',
POS9_FLD_ID CHAR(8) COMMENT 'Right fielder',
BASE1_RUN_ID CHAR(8) COMMENT 'Runner on first',
BASE2_RUN_ID CHAR(8) COMMENT 'Runner on second',
BASE3_RUN_ID CHAR(8) COMMENT 'Runner on third',
EVENT_TX TEXT COMMENT 'Event text',
LEADOFF_FL CHAR(1) COMMENT 'Leadoff flag',
PH_FL CHAR(1) COMMENT 'Pinch-hit flag',
BAT_FLD_CD TINYINT COMMENT 'Defensive position',
BAT_LINEUP_ID TINYINT COMMENT 'Lineup position',
EVENT_CD TINYINT COMMENT 'Event type',
BAT_EVENT_FL CHAR(1) COMMENT 'Batter event flag',
AB_FL CHAR(1) COMMENT 'Official time at bat flag',
H_CD TINYINT COMMENT 'Hit value',
SH_FL CHAR(1) COMMENT 'Sacrifice hit flag',
SF_FL CHAR(1) COMMENT 'Sacrifice fly flag',
EVENT_OUTS_CT TINYINT COMMENT 'Outs on play',
DP_FL CHAR(1) COMMENT 'Double play flag',
TP_FL CHAR(1) COMMENT 'Triple play flag',
RBI_CT TINYINT COMMENT 'RBI on play',
WP_FL CHAR(1) COMMENT 'Wild pitch flag',
PB_FL CHAR(1) COMMENT 'Passed ball flag',
FLD_CD TINYINT COMMENT 'Fielded by',
BATTEDBALL_CD CHAR(1) COMMENT 'Batted ball type',
BUNT_FL CHAR(1) COMMENT 'Bunt flag',
FOUL_FL CHAR(1) COMMENT 'Foul flag',
BATTEDBALL_LOC_TX TEXT COMMENT 'Hit location',
ERR_CT TINYINT COMMENT 'Number of errors',
ERR1_FLD_CD TINYINT COMMENT '1st error player',
ERR1_CD CHAR(1) COMMENT '1st error type',
ERR2_FLD_CD TINYINT COMMENT '2nd error player',
ERR2_CD CHAR(1) COMMENT '2nd error type',
ERR3_FLD_CD TINYINT COMMENT '3rd error player',
ERR3_CD CHAR(1) COMMENT '3rd error type',
BAT_DEST_ID TINYINT COMMENT 'Batter destination',
RUN1_DEST_ID TINYINT COMMENT 'Runner on first destination',
RUN2_DEST_ID TINYINT COMMENT 'Runner on second destination',
RUN3_DEST_ID TINYINT COMMENT 'Runner on third destination',
BAT_PLAY_TX TEXT COMMENT 'Play on batter',
RUN1_PLAY_TX TEXT COMMENT 'Play on runner on first',
RUN2_PLAY_TX TEXT COMMENT 'Play on runner on second',
RUN3_PLAY_TX TEXT COMMENT 'Play on runner on third',
RUN1_SB_FL CHAR(1) COMMENT 'Stolen base for runner on first',
RUN2_SB_FL CHAR(1) COMMENT 'Stolen base for runner on second',
RUN3_SB_FL CHAR(1) COMMENT 'Stolen base for runner on third',
RUN1_CS_FL CHAR(1) COMMENT 'Caught stealing for runner on first',
RUN2_CS_FL CHAR(1) COMMENT 'Caught stealing for runner on second',
RUN3_CS_FL CHAR(1) COMMENT 'Caught stealing for runner on third',
RUN1_PK_FL CHAR(1) COMMENT 'Pickoff of runner on first',
RUN2_PK_FL CHAR(1) COMMENT 'Pickoff of runner on second',
RUN3_PK_FL CHAR(1) COMMENT 'Pickoff of runner on third',
RUN1_RESP_PIT_ID CHAR(8) COMMENT 'Pitcher charged with runner on first',
RUN2_RESP_PIT_ID CHAR(8) COMMENT 'Pitcher charged with runner on second',
RUN3_RESP_PIT_ID CHAR(8) COMMENT 'Pitcher charged with runner on third',
GAME_NEW_FL CHAR(1) COMMENT 'New game flag',
GAME_END_FL CHAR(1) COMMENT 'End game flag',
PR_RUN1_FL CHAR(1) COMMENT 'Pinch-runner on first',
PR_RUN2_FL CHAR(1) COMMENT 'Pinch-runner on second',
PR_RUN3_FL CHAR(1) COMMENT 'Pinch-runner on third',
REMOVED_FOR_PR_RUN1_ID CHAR(8) COMMENT 'Runner removed for pinch-runner on first',
REMOVED_FOR_PR_RUN2_ID CHAR(8) COMMENT 'Runner removed for pinch-runner on second',
REMOVED_FOR_PR_RUN3_ID CHAR(8) COMMENT 'Runner removed for pinch-runner on third',
REMOVED_FOR_PH_BAT_ID CHAR(8) COMMENT 'Batter removed for pinch-hitter',
REMOVED_FOR_PH_BAT_FLD_CD TINYINT COMMENT 'Position of batter removed for pinch-hitter',
PO1_FLD_CD TINYINT COMMENT 'Fielder with first putout',
PO2_FLD_CD TINYINT COMMENT 'Fielder with second putout',
PO3_FLD_CD TINYINT COMMENT 'Fielder with third putout',
ASS1_FLD_CD TINYINT COMMENT 'Fielder with first assist',
ASS2_FLD_CD TINYINT COMMENT 'Fielder with second assist',
ASS3_FLD_CD TINYINT COMMENT 'Fielder with third assist',
ASS4_FLD_CD TINYINT COMMENT 'Fielder with fourth assist',
ASS5_FLD_CD TINYINT COMMENT 'Fielder with fifth assist',
EVENT_ID SMALLINT COMMENT 'Event number',
HOME_TEAM_ID CHAR(3) COMMENT 'home team id',
BAT_TEAM_ID CHAR(3) COMMENT 'batting team id',
FLD_TEAM_ID CHAR(3) COMMENT 'fielding team id',
BAT_LAST_ID TINYINT COMMENT 'half inning (differs from batting team if home team bats first)',
INN_NEW_FL CHAR(1) COMMENT 'start of half inning flag',
INN_END_FL CHAR(1) COMMENT 'end of half inning flag',
START_BAT_SCORE_CT SMALLINT COMMENT 'score for team on offense',
START_FLD_SCORE_CT SMALLINT COMMENT 'score for team on defense',
INN_RUNS_CT SMALLINT COMMENT 'runs scored in this half inning',
GAME_PA_CT SMALLINT COMMENT 'number of plate appearances in game for team on offense',
INN_PA_CT SMALLINT COMMENT 'number of plate appearances in inning for team on offense',
PA_NEW_FL CHAR(1) COMMENT 'start of plate appearance flag',
PA_TRUNC_FL CHAR(1) COMMENT 'truncated plate appearance flag',
START_BASES_CD TINYINT COMMENT 'base state at start of play',
END_BASES_CD TINYINT COMMENT 'base state at end of play',
BAT_START_FL CHAR(1) COMMENT 'batter is starter flag',
RESP_BAT_START_FL CHAR(1) COMMENT 'result batter is starter flag',
BAT_ON_DECK_ID CHAR(8) COMMENT 'ID of the batter on deck',
BAT_IN_HOLD_ID CHAR(8) COMMENT 'ID of the batter in the hold',
PIT_START_FL CHAR(1) COMMENT 'pitcher is starter flag',
RESP_PIT_START_FL CHAR(1) COMMENT 'result pitcher is starter flag',
RUN1_FLD_CD TINYINT COMMENT 'defensive position of runner on first',
RUN1_LINEUP_CD TINYINT COMMENT 'lineup position of runner on first',
RUN1_ORIGIN_EVENT_ID SMALLINT COMMENT 'event number on which runner on first reached base',
RUN2_FLD_CD TINYINT COMMENT 'defensive position of runner on second',
RUN2_LINEUP_CD TINYINT COMMENT 'lineup position of runner on second',
RUN2_ORIGIN_EVENT_ID SMALLINT COMMENT 'event number on which runner on second reached base',
RUN3_FLD_CD TINYINT COMMENT 'defensive position of runner on third',
RUN3_LINEUP_CD TINYINT COMMENT 'lineup position of runner on third',
RUN3_ORIGIN_EVENT_ID SMALLINT COMMENT 'event number on which runner on third reached base',
RUN1_RESP_CAT_ID CHAR(8) COMMENT 'responsible catcher for runner on first',
RUN2_RESP_CAT_ID CHAR(8) COMMENT 'responsible catcher for runner on second',
RUN3_RESP_CAT_ID CHAR(8) COMMENT 'responsible catcher for runner on third',
PA_BALL_CT TINYINT COMMENT 'number of balls in plate appearance',
PA_CALLED_BALL_CT TINYINT COMMENT 'number of called balls in plate appearance',
PA_INTENT_BALL_CT TINYINT COMMENT 'number of intentional balls in plate appearance',
PA_PITCHOUT_BALL_CT TINYINT COMMENT 'number of pitchouts in plate appearance',
PA_HITBATTER_BALL_CT TINYINT COMMENT 'number of pitches hitting batter in plate appearance',
PA_OTHER_BALL_CT TINYINT COMMENT 'number of other balls in plate appearance',
PA_STRIKE_CT TINYINT COMMENT 'number of strikes in plate appearance',
PA_CALLED_STRIKE_CT TINYINT COMMENT 'number of called strikes in plate appearance',
PA_SWINGMISS_STRIKE_CT TINYINT COMMENT 'number of swinging strikes in plate appearance',
PA_FOUL_STRIKE_CT TINYINT COMMENT 'number of foul balls in plate appearance',
PA_INPLAY_STRIKE_CT TINYINT COMMENT 'number of balls in play in plate appearance',
PA_OTHER_STRIKE_CT TINYINT COMMENT 'number of other strikes in plate appearance',
EVENT_RUNS_CT TINYINT COMMENT 'number of runs on play',
FLD_ID CHAR(8) COMMENT 'id of player fielding batted ball',
BASE2_FORCE_FL CHAR(1) COMMENT 'force play at second flag',
BASE3_FORCE_FL CHAR(1) COMMENT 'force play at third flag',
BASE4_FORCE_FL CHAR(1) COMMENT 'force play at home flag',
BAT_SAFE_ERR_FL CHAR(1) COMMENT 'batter safe on error flag',
BAT_FATE_ID TINYINT COMMENT 'fate of batter (base ultimately advanced to)',
RUN1_FATE_ID TINYINT COMMENT 'fate of runner on first',
RUN2_FATE_ID TINYINT COMMENT 'fate of runner on second',
RUN3_FATE_ID TINYINT COMMENT 'fate of runner on third',
FATE_RUNS_CT SMALLINT COMMENT 'runs scored in half inning after this event',
ASS6_FLD_CD TINYINT COMMENT 'fielder with sixth assist',
ASS7_FLD_CD TINYINT COMMENT 'fielder with seventh assist',
ASS8_FLD_CD TINYINT COMMENT 'fielder with eighth assist',
ASS9_FLD_CD TINYINT COMMENT 'fielder with ninth assist',
ASS10_FLD_CD TINYINT COMMENT 'fielder with tenth assist',
UNKNOWN_OUT_EXC_FL CHAR(1) COMMENT 'unknown fielding credit flag',
UNCERTAIN_PLAY_EXC_FL CHAR(1) COMMENT 'uncertain play flag',
PRIMARY KEY (event_key)
) COMMENT \'Represents an event in a game, as generated by Chadwick from Retrosheet data\';
"""
_game = """
CREATE TABLE IF NOT EXISTS game (
GAME_ID CHAR(12) COMMENT 'Game ID',
GAME_DT DATE COMMENT 'Date',
GAME_CT TINYINT COMMENT 'Game number',
GAME_DY TEXT COMMENT 'Day of week',
START_GAME_TM SMALLINT COMMENT 'Start time',
DH_FL CHAR(1) COMMENT 'DH used flag',
DAYNIGHT_PARK_CD CHAR(1) COMMENT 'Day/night flag',
AWAY_TEAM_ID CHAR(3) COMMENT 'Visiting team',
HOME_TEAM_ID CHAR(3) COMMENT 'Home team',
PARK_ID CHAR(5) COMMENT 'Game site',
AWAY_START_PIT_ID CHAR(8) COMMENT 'Visitors starting pitcher',
HOME_START_PIT_ID CHAR(8) COMMENT 'Home starting pitcher',
BASE4_UMP_ID CHAR(8) COMMENT 'Home plate umpire',
BASE1_UMP_ID CHAR(8) COMMENT 'First base umpire',
BASE2_UMP_ID CHAR(8) COMMENT 'Second base umpire',
BASE3_UMP_ID CHAR(8) COMMENT 'Third base umpire',
LF_UMP_ID CHAR(8) COMMENT 'Left field umpire',
RF_UMP_ID CHAR(8) COMMENT 'Right field umpire',
ATTEND_PARK_CT MEDIUMINT COMMENT 'Attendance',
SCORER_RECORD_ID TEXT COMMENT 'PS scorer',
TRANSLATOR_RECORD_ID TEXT COMMENT 'Translator',
INPUTTER_RECORD_ID TEXT COMMENT 'Inputter',
INPUT_RECORD_TS DATETIME COMMENT 'Input time',
EDIT_RECORD_TS DATETIME COMMENT 'Edit time',
METHOD_RECORD_CD TINYINT COMMENT 'How scored',
PITCHES_RECORD_CD TINYINT COMMENT 'Pitches entered',
TEMP_PARK_CT SMALLINT COMMENT 'Temperature',
WIND_DIRECTION_PARK_CD TINYINT COMMENT 'Wind direction',
WIND_SPEED_PARK_CT TINYINT COMMENT 'Wind speed',
FIELD_PARK_CD TINYINT COMMENT 'Field condition',
PRECIP_PARK_CD TINYINT COMMENT 'Precipitation',
SKY_PARK_CD | |
# FIXME: maplike<> and setlike<> should also imply the presence of a
# 'size' attribute.
# Stringifier
if interface.stringifier:
stringifier = interface.stringifier
stringifier_ext_attrs = stringifier.extended_attributes.copy()
if stringifier.attribute:
implemented_as = stringifier.attribute.name
elif stringifier.operation:
implemented_as = stringifier.operation.name
else:
implemented_as = 'toString'
methods.append(
generated_method(
return_type=IdlType('DOMString'),
name='toString',
extended_attributes=stringifier_ext_attrs,
implemented_as=implemented_as))
for method in methods:
# The value of the Function object’s “length” property is a Number
# determined as follows:
# 1. Let S be the effective overload set for regular operations (if the
# operation is a regular operation) or for static operations (if the
# operation is a static operation) with identifier id on interface I and
# with argument count 0.
# 2. Return the length of the shortest argument list of the entries in S.
# FIXME: This calculation doesn't take into account whether runtime
# enabled overloads are actually enabled, so length may be incorrect.
# E.g., [RuntimeEnabled=Foo] void f(); void f(long x);
# should have length 1 if Foo is not enabled, but length 0 if it is.
method['length'] = (method['overloads']['length']
if 'overloads' in method else
method['number_of_required_arguments'])
return {
'iterator_method': iterator_method,
'iterator_method_alias': iterator_method_alias,
'methods': methods,
}
def reflected_name(constant_name):
"""Returns the name to use for the matching constant name in blink code.
Given an all-uppercase 'CONSTANT_NAME', returns a camel-case
'kConstantName'.
"""
# Check for SHOUTY_CASE constants
if constant_name.upper() != constant_name:
return constant_name
return 'k' + ''.join(part.title() for part in constant_name.split('_'))
# [DeprecateAs], [Reflect], [RuntimeEnabled]
def constant_context(constant, interface, component_info):
extended_attributes = constant.extended_attributes
runtime_features = component_info['runtime_enabled_features']
return {
'camel_case_name':
NameStyleConverter(constant.name).to_upper_camel_case(),
'cpp_class':
extended_attributes.get('PartialInterfaceImplementedAs'),
'cpp_type':
constant.idl_type.cpp_type,
'deprecate_as':
v8_utilities.deprecate_as(constant), # [DeprecateAs]
'idl_type':
constant.idl_type.name,
'measure_as':
v8_utilities.measure_as(constant, interface), # [MeasureAs]
'high_entropy':
v8_utilities.high_entropy(constant), # [HighEntropy]
'name':
constant.name,
# [RuntimeEnabled] for origin trial
'origin_trial_feature_name':
v8_utilities.origin_trial_feature_name(constant, runtime_features),
# FIXME: use 'reflected_name' as correct 'name'
'rcs_counter':
'Blink_' + v8_utilities.cpp_name(interface) + '_' + constant.name +
'_ConstantGetter',
'reflected_name':
extended_attributes.get('Reflect', reflected_name(constant.name)),
# [RuntimeEnabled] if not in origin trial
'runtime_enabled_feature_name':
runtime_enabled_feature_name(constant, runtime_features),
'value':
constant.value,
}
################################################################################
# Overloads
################################################################################
def compute_method_overloads_context(interface, methods):
# Regular methods
compute_method_overloads_context_by_type(
interface, [method for method in methods if not method['is_static']])
# Static methods
compute_method_overloads_context_by_type(
interface, [method for method in methods if method['is_static']])
def compute_method_overloads_context_by_type(interface, methods):
"""Computes |method.overload*| template values.
Called separately for static and non-static (regular) methods,
as these are overloaded separately.
Modifies |method| in place for |method| in |methods|.
Doesn't change the |methods| list itself (only the values, i.e. individual
methods), so ok to treat these separately.
"""
# Add overload information only to overloaded methods, so template code can
# easily verify if a function is overloaded
for name, overloads in method_overloads_by_name(methods):
# Resolution function is generated after last overloaded function;
# package necessary information into |method.overloads| for that method.
overloads[-1]['overloads'] = overloads_context(interface, overloads)
overloads[-1]['overloads']['name'] = name
overloads[-1]['overloads']['camel_case_name'] = NameStyleConverter(
name).to_upper_camel_case()
def overloads_context(interface, overloads):
"""Returns |overloads| template values for a single name.
Sets |method.overload_index| in place for |method| in |overloads|
and returns dict of overall overload template values.
"""
assert len(overloads) > 1 # only apply to overloaded names
for index, method in enumerate(overloads, 1):
method['overload_index'] = index
# [RuntimeEnabled]
if any(method.get('origin_trial_feature_name') for method in overloads):
raise Exception(
'[RuntimeEnabled] for origin trial cannot be specified on '
'overloaded methods: %s.%s' % (interface.name,
overloads[0]['name']))
effective_overloads_by_length = effective_overload_set_by_length(overloads)
lengths = [length for length, _ in effective_overloads_by_length]
name = overloads[0].get('name', '<constructor>')
camel_case_name = NameStyleConverter(name).to_upper_camel_case()
runtime_determined_lengths = None
function_length = lengths[0]
runtime_determined_maxargs = None
maxarg = lengths[-1]
# The special case handling below is not needed if all overloads are
# runtime enabled by the same feature.
if not common_value(overloads, 'runtime_enabled_feature_name'):
# Check if all overloads with the shortest acceptable arguments list are
# runtime enabled, in which case we need to have a runtime determined
# Function.length.
shortest_overloads = effective_overloads_by_length[0][1]
if (all(
method.get('runtime_enabled_feature_name')
for method, _, _ in shortest_overloads)):
# Generate a list of (length, runtime_enabled_feature_names) tuples.
runtime_determined_lengths = []
for length, effective_overloads in effective_overloads_by_length:
runtime_enabled_feature_names = set(
method['runtime_enabled_feature_name']
for method, _, _ in effective_overloads)
if None in runtime_enabled_feature_names:
# This "length" is unconditionally enabled, so stop here.
runtime_determined_lengths.append((length, [None]))
break
runtime_determined_lengths.append(
(length, sorted(runtime_enabled_feature_names)))
function_length = ('%s::%sMethodLength()' % (
internal_namespace(interface), camel_case_name))
# Check if all overloads with the longest required arguments list are
# runtime enabled, in which case we need to have a runtime determined
# maximum distinguishing argument index.
longest_overloads = effective_overloads_by_length[-1][1]
if (not common_value(overloads, 'runtime_enabled_feature_name')
and all(
method.get('runtime_enabled_feature_name')
for method, _, _ in longest_overloads)):
# Generate a list of (length, runtime_enabled_feature_name) tuples.
runtime_determined_maxargs = []
for length, effective_overloads in reversed(
effective_overloads_by_length):
runtime_enabled_feature_names = set(
method['runtime_enabled_feature_name']
for method, _, _ in effective_overloads
if method.get('runtime_enabled_feature_name'))
if not runtime_enabled_feature_names:
# This "length" is unconditionally enabled, so stop here.
runtime_determined_maxargs.append((length, [None]))
break
runtime_determined_maxargs.append(
(length, sorted(runtime_enabled_feature_names)))
maxarg = ('%s::%sMethodMaxArg()' % (internal_namespace(interface),
camel_case_name))
# Check and fail if overloads disagree about whether the return type
# is a Promise or not.
promise_overload_count = sum(
1 for method in overloads if method.get('returns_promise'))
if promise_overload_count not in (0, len(overloads)):
raise ValueError(
'Overloads of %s have conflicting Promise/non-Promise types' %
(name))
has_overload_visible = False
has_overload_not_visible = False
for overload in overloads:
if overload.get('visible', True):
# If there exists an overload which is visible, need to generate
# overload_resolution, i.e. overlods_visible should be True.
has_overload_visible = True
else:
has_overload_not_visible = True
# If some overloads are not visible and others are visible,
# the method is overloaded between core and modules.
has_partial_overloads = has_overload_visible and has_overload_not_visible
return {
'deprecate_all_as':
common_value(overloads, 'deprecate_as'), # [DeprecateAs]
'exposed_test_all':
common_value(overloads, 'exposed_test'), # [Exposed]
'length':
function_length,
'length_tests_methods':
length_tests_methods(effective_overloads_by_length),
# 1. Let maxarg be the length of the longest type list of the
# entries in S.
'maxarg':
maxarg,
'measure_all_as':
common_value(overloads, 'measure_as'), # [MeasureAs]
'returns_promise_all':
promise_overload_count > 0,
'runtime_determined_lengths':
runtime_determined_lengths,
'runtime_determined_maxargs':
runtime_determined_maxargs,
# [RuntimeEnabled]
'runtime_enabled_all':
common_value(overloads, 'runtime_enabled_feature_name'),
# [CrossOriginIsolated]
'cross_origin_isolated_test_all':
common_value(overloads, 'cross_origin_isolated_test'),
# [DirectSocketEnabled]
'direct_socket_enabled_test_all':
common_value(overloads, 'direct_socket_enabled_test'),
# [SecureContext]
'secure_context_test_all':
common_value(overloads, 'secure_context_test'),
'valid_arities': (
lengths
# Only need to report valid arities if there is a gap in the
# sequence of possible lengths, otherwise invalid length means
# "not enough arguments".
if lengths[-1] - lengths[0] != len(lengths) - 1 else None),
'visible':
has_overload_visible,
'has_partial_overloads':
has_partial_overloads,
}
def distinguishing_argument_index(entries):
"""Returns the distinguishing argument index for a sequence of entries.
Entries are elements of the effective overload set with the same number
of arguments (formally, same type list length), each a 3-tuple of the form
(callable, type list, optionality list).
Spec: http://heycam.github.io/webidl/#dfn-distinguishing-argument-index
If there is more than one entry in an effective overload set that has a
given type list length, then for those entries there must be an index i
such that for each pair of entries the types at index i are
distinguishable.
The lowest such index is termed the distinguishing argument index for the
entries of the effective overload set with the given type list length.
"""
# Only applicable “If there is more than one entry”
assert len(entries) > 1
def typename_without_nullable(idl_type):
if idl_type.is_nullable:
return idl_type.inner_type.name
return idl_type.name
type_lists = [
tuple(typename_without_nullable(idl_type) for idl_type in entry[1])
for entry in entries
]
type_list_length = len(type_lists[0])
# Only applicable for entries that “[have] a given type list length”
assert all(len(type_list) == type_list_length for type_list in type_lists)
name = entries[0][0].get('name', 'Constructor') # for error reporting
# The spec defines the distinguishing argument index by conditions it must
# satisfy, but does not give an algorithm.
#
# We compute the distinguishing argument index by first computing the
# minimum index where not all types are the same, and then checking that
# all types in this position are distinguishable (and the optionality lists
# up to this point are identical), since "minimum index where not all types
# are the same" is a *necessary* condition, and more direct to check than
# distinguishability.
types_by_index = (set(types) for types in zip(*type_lists))
try:
# “In addition, for each index j, where j is less than the
# distinguishing argument index for a given type list length, the types
# at index j in all of the entries’ | |
X_train\n }, \n y_train,\n batch_size=BATCH_SIZE,\n epochs=EPOCHS,\n validation_data=(\n {\n FAV: embed_features[FAV]["test"],\n UNFAV: embed_features[UNFAV]["test"],\n "non_embed_inputs": X_test\n },\n y_test\n ),\n callbacks=[tfdocs.modeling.EpochDots(), tensorboard_callback], \n verbose=0,\n sample_weight=sample_weights_train if FAIRNESS_REWEIGHING_ENABLED else None\n))')
metrics_df = pd.DataFrame(train_histories[-1].history) # pick the latest training history
metrics_df.tail(1) # pick the last epoch's metrics
from sklearn.metrics import roc_auc_score, classification_report, precision_score, recall_score, f1_score
import sklearn
from collections import OrderedDict
assert sklearn.__version__.startswith('0.22'), "Please upgrade scikit-learn (https://scikit-learn.org/stable/install.html)"
y_prob = model.predict([embed_features[FAV]["test"], embed_features[UNFAV]["test"], X_test], BATCH_SIZE)
y_true = y_test
y_pred = (y_prob / np.max(y_prob, axis=1).reshape(-1, 1)).astype(int) # convert probabilities to predictions
pd.DataFrame(OrderedDict({
"macro_roc_auc_ovo": [roc_auc_score(y_test, y_prob, multi_class="ovo", average="macro")],
"weighted_roc_auc_ovo": roc_auc_score(y_test, y_prob, multi_class="ovo", average="weighted"),
"macro_roc_auc_ovr": roc_auc_score(y_test, y_prob, multi_class="ovr", average="macro"),
"weighted_roc_auc_ovr": roc_auc_score(y_test, y_prob, multi_class="ovr", average="weighted"),
"weighted_precision": precision_score(y_test, y_pred, average="weighted"),
"weighted_recall": recall_score(y_test, y_pred, average="weighted"),
"weighted_f1": f1_score(y_test, y_pred, average="weighted")
}))
print(classification_report(y_true, y_pred))
model.save((logdir/"keras_saved_model").as_posix(), save_format="tf")
PREDICTED_RATING, PREDICTION_CONFIDENCE = "pred_rating", "pred_confidence"
def predict_on_dataset(df:pd.DataFrame, model:tf.keras.Model, es:Dict, inp_cols_of_interest:List[str]):
"""
Make predictions on df using the model and return inp_cols_of_interest
IMPORTANT: embedding store, es should be the same as the model was trained on
"""
X = transform_pd_X(df[SELECTED_COLS], SELECTED_INP_COLS)
embed_features = {}
for col in EMBED_COLS:
embed_features[col], _ = transform_embed_col(df[col], es[col]["tokenizer"], es[col]["maxlen"])
predict_proba = model.predict([embed_features[FAV], embed_features[UNFAV], X], batch_size=BATCH_SIZE)
df[PREDICTED_RATING], df[PREDICTION_CONFIDENCE] = np.argmax(predict_proba, axis=1), np.max(predict_proba, axis=1)
return df[inp_cols_of_interest + [RATING, PREDICTED_RATING, PREDICTION_CONFIDENCE]]
PredictionReport = namedtuple("PredictionReport", "probabilities predicted_rating confidence")
# Note: Create a dataframe with all SELECTED_INP_COLS
test_df = pd.DataFrame({
AGE: ["45"],
ZIP_CODE: ["94086"],
FAVE_SPORTS: ["I do not like Sports"]
})
probabilities = model.predict(transform_pd_X(test_df, SELECTED_INP_COLS))
predicted_rating, confidence = np.argmax(probabilities), np.max(probabilities)
PredictionReport(probabilities, predicted_rating, confidence)
RANDOM_SEED = 1589150656 # seed used in a historical run of interest for fairness analysis
with open(f"inp-{RANDOM_SEED}.pickle", "rb") as f:
X_train, X_test, y_train, y_test, embed_features, embedding_store, sample_weights_train = pickle.load(f)
X_train.shape, y_train.shape, embed_features[FAV]["train"].shape, embed_features[UNFAV]["train"].shape
model = tf.keras.models.load_model(f"logs/{RANDOM_SEED}/keras_saved_model")
model.summary()
fairness_df = ad_dataset_pd() #.sample(n=100)
merge_minority_classes(fairness_df, RATING) # modifies original dataframe
fairness_df[RATING] = fairness_df[RATING].astype("float")
fairness_df[RATING] = fairness_df[RATING] - 1
train_fairness_df, val_fairness_df = train_test_split(fairness_df, test_size=TEST_FRAC, random_state=RANDOM_SEED)
train_fairness_df.shape, val_fairness_df.shape
get_ipython().run_cell_magic('time', '', '\nres = predict_on_dataset(val_fairness_df, model, embedding_store, [AD_ID, AGE, GENDER, INCOME, HOME_COUNTRY])')
out_path = f"logs/{RANDOM_SEED}/inference_data.csv"
res.to_csv(out_path, index=False)
print(f"Saved inference data at {out_path}")
# Credits: https://stackoverflow.com/a/50671617/1585523 has good explanations for all metrics and is from where the code has been copied
def metrics_from_df(df:pd.DataFrame, confidence_threshold=0):
"""Drop examples with probability < confidence_threshold from calc"""
y_true = df[RATING]
y_pred = df[PREDICTED_RATING]
cnf_matrix = confusion_matrix(y_true, y_pred)
FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)
FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)
TP = np.diag(cnf_matrix)
TN = cnf_matrix.sum() - (FP + FN + TP)
FP = FP.astype(float)
FN = FN.astype(float)
TP = TP.astype(float)
TN = TN.astype(float)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
# Overall accuracy
ACC = (TP+TN)/(TP+FP+FN+TN)
return {
"TPR": TPR, "TNR": TNR, "PPV": PPV, "NPV": NPV, "FPR": FPR, "FNR": FNR, "FDR": FDR, "ACC": ACC
}
class GroupFairnessMetrics:
def __init__(self, model_inference_data:pd.DataFrame, protected_feature:str):
"""
Compute fairness metrics between 2 groups of population - privileged & unprivileged
Based on your dataset you could use,
"Female" to privileged_grp_label and
"Male" to be unprivileged_grp_label
for Gender as the protected_feature
All metrics are computed on model_inference_data which has RATING and PREDICTED_RATING values for each row
"""
self._df, self._pf, = model_inference_data, protected_feature
self._base_metrics = "fairness_metrics_per_class"
self._pf_metrics_df = self._df.groupby(self._pf).apply(metrics_from_df).to_frame(self._base_metrics)
def fetch_base_metrics(self):
return self._pf_metrics_df
def equal_opportunity_difference(self, pg_lbl:str, upg_lbl:str, rating_class=1):
r"""TPR{unprivileged} - TPR{privileged} ideally should be zero"""
upg_opp = self._pf_metrics_df.loc[upg_lbl][self._base_metrics]["TPR"][rating_class]
pg_opp = self._pf_metrics_df.loc[pg_lbl][self._base_metrics]["TPR"][rating_class]
return upg_opp - pg_opp
def statistical_parity_difference(self):
raise NotImplementedError("TODO")
def average_odds_difference(self, pg_lbl:str, upg_lbl:str, rating_class=1):
"""Average of difference in FPR and TPR for unprivileged and privileged groups"""
tpr_diff = self.equal_opportunity_difference(pg_lbl, upg_lbl, rating_class)
upg_fpr = self._pf_metrics_df.loc[upg_lbl][self._base_metrics]["FPR"][rating_class]
pg_fpr = self._pf_metrics_df.loc[pg_lbl][self._base_metrics]["FPR"][rating_class]
fpr_diff = upg_fpr - pg_fpr
return 0.5 * (fpr_diff + tpr_diff)
def disparate_impact():
raise NotImplementedError("TODO")
def theil_index():
raise NotImplementedError("TODO")
def plot_for_metric_class(metric_df:pd.DataFrame, metric:str="FPR", rating_class:int=1):
"""Generates plot for metric and given rating_class from metric_df indexed by dimension of interest"""
plot_df = metric_df.apply(lambda m: m["fairness_metrics_per_class"][metric][rating_class], axis=1)
plot_df = plot_df.reset_index().rename({0: metric}, axis=1)
return plot_df
res = pd.read_csv(f"logs/{RANDOM_SEED}/inference_data.csv")
inference_res = res.copy()
res.sample(4).T
gender_fairness_metrics = GroupFairnessMetrics(res, GENDER)
plot_df = plot_for_metric_class(gender_fairness_metrics.fetch_base_metrics())
plot_df
get_ipython().run_line_magic('matplotlib', 'inline')
ax = sns.barplot(x=GENDER, y="FPR", data=plot_df)
plot_df.to_clipboard(False)
gender_fairness_metrics.equal_opportunity_difference("F", "M")
gender_fairness_metrics.average_odds_difference("F", "M")
res = inference_res.copy()
res[AGE] = res[AGE].astype("int")
ax = sns.distplot(res[AGE], kde=False, bins=50)
AGE_BUCKET = AGE + "_bucket"
bucket_boundaries = [0, 20, 40, 100] # refer pandas.cut() for syntax on binning
age_labels = ["young", "middle-age", "old"] # refer pandas.cut() for syntax on labels
res[AGE_BUCKET] = pd.cut(res[AGE], bins=bucket_boundaries, labels=age_labels)
res[[AGE, AGE_BUCKET]].sample(n=5)
ax = sns.countplot(res[AGE_BUCKET])
res[AGE_BUCKET].value_counts()
age_metrics_df = res.groupby(AGE_BUCKET).apply(metrics_from_df).to_frame("fairness_metrics_per_class")
age_metrics_df
plot_df = plot_for_metric_class(age_metrics_df)
plot_df
ax = sns.barplot(x=AGE_BUCKET, y="FPR", data=plot_df)
plot_df.to_clipboard(False)
age_gfm = GroupFairnessMetrics(res, AGE_BUCKET)
print(f'{age_gfm.equal_opportunity_difference("young", "middle-age")}\t{age_gfm.equal_opportunity_difference("young", "old")}')
print(f'{age_gfm.average_odds_difference("young", "middle-age")}\t{age_gfm.average_odds_difference("young", "old")}')
res = inference_res.copy()
ax = sns.countplot(res[INCOME])
res[INCOME].value_counts()
income_metrics_df = res.groupby(INCOME).apply(metrics_from_df).to_frame("fairness_metrics_per_class")
income_metrics_df
plot_df = plot_for_metric_class(income_metrics_df)
plot_df
print("\t".join(plot_df["FPR"].astype("str")))
ax = sns.barplot(x=INCOME, y="FPR", data=plot_df)
plot_df.to_clipboard(False)
income_gfm = GroupFairnessMetrics(res, INCOME)
print(
f"{income_gfm.equal_opportunity_difference(0, 1)}\t{income_gfm.equal_opportunity_difference(0, 3)}"
)
print(
f"{income_gfm.average_odds_difference(0, 1)}\t{income_gfm.average_odds_difference(0, 3)}"
)
res = inference_res.copy()
ax = sns.countplot(res[HOME_COUNTRY])
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
res[HOME_COUNTRY].value_counts()
homecountry_metrics_df = res.groupby(HOME_COUNTRY).apply(metrics_from_df).to_frame("fairness_metrics_per_class")
homecountry_metrics_df
plot_df = plot_for_metric_class(homecountry_metrics_df)
plot_df
ax = sns.barplot(x=HOME_COUNTRY, y="FPR", data=plot_df)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90);
res = inference_res[inference_res[AD_ID].str.startswith("A11_")]
print(f"Of {inference_res.shape[0]} ads, {res.shape[0]} ads fall in this category")
gender_metrics_df = res.groupby(GENDER).apply(metrics_from_df).to_frame("fairness_metrics_per_class")
gender_metrics_df
plot_df = plot_for_metric_class(gender_metrics_df)
plot_df
ax = sns.barplot(x=GENDER, y="FPR", data=plot_df)
plot_df.to_clipboard(False)
gender_fairness_metrics = GroupFairnessMetrics(res, GENDER)
gender_fairness_metrics.equal_opportunity_difference("F", "M")
gender_fairness_metrics.average_odds_difference("F", "M")
ax = sns.countplot(res[INCOME])
res[INCOME].value_counts()
income_metrics_df = res.groupby(INCOME).apply(metrics_from_df).to_frame("fairness_metrics_per_class")
income_metrics_df
plot_df = plot_for_metric_class(income_metrics_df)
plot_df
print("\t".join(plot_df["FPR"].astype("str")))
ax = sns.barplot(x=INCOME, y="FPR", data=plot_df)
plot_df.to_clipboard(False)
income_gfm = GroupFairnessMetrics(res, INCOME)
print(
f"{income_gfm.equal_opportunity_difference(0, 1)}\t{income_gfm.equal_opportunity_difference(0, 3)}"
)
print(
f"{income_gfm.average_odds_difference(0, 1)}\t{income_gfm.average_odds_difference(0, 3)}"
)
res = inference_res[inference_res[AD_ID].str.startswith("A03_")]
print(f"Of {inference_res.shape[0]} ads, {res.shape[0]} ads fall in this category")
gender_metrics_df = res.groupby(GENDER).apply(metrics_from_df).to_frame("fairness_metrics_per_class")
gender_metrics_df
plot_df = plot_for_metric_class(gender_metrics_df)
plot_df
get_ipython().run_line_magic('matplotlib', 'inline')
ax = sns.barplot(x=GENDER, y="FPR", data=plot_df)
plot_df.to_clipboard(False)
gender_fairness_metrics = GroupFairnessMetrics(res, GENDER)
gender_fairness_metrics.equal_opportunity_difference("F", "M")
gender_fairness_metrics.average_odds_difference("F", "M")
res = inference_res[inference_res[AD_ID].str.startswith("A06_")]
print(f"Of {inference_res.shape[0]} ads, {res.shape[0]} ads fall in this category")
gender_metrics_df = res.groupby(GENDER).apply(metrics_from_df).to_frame("fairness_metrics_per_class")
gender_metrics_df
plot_df = plot_for_metric_class(gender_metrics_df)
plot_df
ax = sns.barplot(x=GENDER, y="FPR", data=plot_df)
plot_df.to_clipboard(False)
gender_fairness_metrics = GroupFairnessMetrics(res, GENDER)
gender_fairness_metrics.equal_opportunity_difference("F", "M")
gender_fairness_metrics.average_odds_difference("F", "M")
res[AGE] = res[AGE].astype("int")
ax = sns.distplot(res[AGE], kde=False, bins=50)
AGE_BUCKET = AGE + "_bucket"
bucket_boundaries = [0, 20, 40, 100] # refer pandas.cut() for syntax on binning
age_labels = ["young", "middle-age", "old"] # refer pandas.cut() for syntax on labels
res[AGE_BUCKET] = pd.cut(res[AGE], bins=bucket_boundaries, labels=age_labels)
res[[AGE, AGE_BUCKET]].sample(n=5)
ax = sns.countplot(res[AGE_BUCKET])
res[AGE_BUCKET].value_counts()
age_metrics_df = res.groupby(AGE_BUCKET).apply(metrics_from_df).to_frame("fairness_metrics_per_class")
age_metrics_df
plot_df = plot_for_metric_class(age_metrics_df)
plot_df
print("\t".join(plot_df["FPR"].astype("str")))
ax = sns.barplot(x=AGE_BUCKET, y="FPR", data=plot_df)
plot_df.to_clipboard(False)
age_gfm = GroupFairnessMetrics(res, AGE_BUCKET)
print(f'{age_gfm.equal_opportunity_difference("young", "middle-age")}\t{age_gfm.equal_opportunity_difference("young", "old")}')
print(f'{age_gfm.average_odds_difference("young", "middle-age")}\t{age_gfm.average_odds_difference("young", "old")}')
ax = sns.countplot(res[INCOME])
res[INCOME].value_counts()
income_metrics_df = res.groupby(INCOME).apply(metrics_from_df).to_frame("fairness_metrics_per_class")
income_metrics_df
plot_df = plot_for_metric_class(income_metrics_df)
plot_df
print("\t".join(plot_df["FPR"].astype("str")))
ax = sns.barplot(x=INCOME, y="FPR", data=plot_df)
plot_df.to_clipboard(False)
income_gfm = GroupFairnessMetrics(res, INCOME)
print(
f"{income_gfm.equal_opportunity_difference(0, 1)}\t{income_gfm.equal_opportunity_difference(0, 3)}"
)
print(
f"{income_gfm.average_odds_difference(0, 1)}\t{income_gfm.average_odds_difference(0, 3)}"
)
res = inference_res[inference_res[AD_ID].str.startswith("A18_")]
print(f"Of {inference_res.shape[0]} ads, {res.shape[0]} ads fall in this category")
gender_metrics_df = res.groupby(GENDER).apply(metrics_from_df).to_frame("fairness_metrics_per_class")
gender_metrics_df
plot_df = plot_for_metric_class(gender_metrics_df)
plot_df
ax = sns.barplot(x=GENDER, y="FPR", data=plot_df)
plot_df.to_clipboard(False)
gender_fairness_metrics = GroupFairnessMetrics(res, GENDER)
gender_fairness_metrics.equal_opportunity_difference("F", "M")
gender_fairness_metrics.average_odds_difference("F", "M")
res[AGE] = res[AGE].astype("int")
ax = sns.distplot(res[AGE], kde=False, bins=50)
AGE_BUCKET = AGE + "_bucket"
bucket_boundaries = [0, 20, 40, 100] # refer pandas.cut() for syntax on binning
age_labels = ["young", "middle-age", "old"] # refer pandas.cut() for syntax on labels
res[AGE_BUCKET] = pd.cut(res[AGE], bins=bucket_boundaries, labels=age_labels)
res[[AGE, AGE_BUCKET]].sample(n=5)
ax = sns.countplot(res[AGE_BUCKET])
res[AGE_BUCKET].value_counts()
age_metrics_df = res.groupby(AGE_BUCKET).apply(metrics_from_df).to_frame("fairness_metrics_per_class")
age_metrics_df
plot_df = plot_for_metric_class(age_metrics_df)
plot_df
print("\t".join(plot_df["FPR"].astype("str")))
ax = sns.barplot(x=AGE_BUCKET, y="FPR", data=plot_df)
plot_df.to_clipboard(False)
age_gfm = GroupFairnessMetrics(res, AGE_BUCKET)
print(f'{age_gfm.equal_opportunity_difference("young", "middle-age")}\t{age_gfm.equal_opportunity_difference("young", "old")}')
print(f'{age_gfm.average_odds_difference("young", "middle-age")}\t{age_gfm.average_odds_difference("young", "old")}')
EXAMPLE_BATCH = next(iter(input_fn_train(3)))[0]
EXAMPLE_BATCH
def test_feature_column(feature_column):
feature_layer = tf.keras.layers.DenseFeatures(feature_column)
return feature_layer(EXAMPLE_BATCH).numpy()
age_fc = tf.feature_column.numeric_column(AGE, normalizer_fn=lambda x: (x - MEAN_AGE) / STD_AGE)
zip_fcs = [
tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(
f"{ZIP_CODE}{i}", vocabulary_list=list(string.digits),
num_oov_buckets=1)
)
for i in range(FIRST_K_ZIP_DIGITS)
]
EXAMPLE_BATCH[AGE], test_feature_column(age_fc)
{k: v for k, v in EXAMPLE_BATCH.items() if k.startswith(ZIP_CODE)}, test_feature_column(zip_fcs)
tf.keras.layers.concatenate(age_fc, zip_fcs[0])
import os
import tempfile
import apache_beam as beam
import numpy as np
import pandas as pd
from datetime import datetime
import tensorflow_hub as hub
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_data_validation as tfdv
from tensorflow_model_analysis.addons.fairness.post_export_metrics import fairness_indicators
from tensorflow_model_analysis.addons.fairness.view import widget_view
from fairness_indicators.examples import util
from witwidget.notebook.visualization import WitConfigBuilder
from witwidget.notebook.visualization import WitWidget
logdir
y_train[:100, :].shape
from collections import OrderedDict
def dl():
for i in range(100):
yield OrderedDict({
FAV: embed_features[FAV]["train"][i, :],
UNFAV: embed_features[UNFAV]["train"][i, :],
"non_embed_inputs": X_train[i, :],
RATING: y_train[i, :]
})
dataset = tf.data.Dataset.from_generator(dl,
(tf.float32, tf.float32, tf.float32, tf.float32),
(tf.TensorShape([40]), tf.TensorShape([34]), tf.TensorShape([308]), tf.TensorShape([2])))
NON_EMBED_INPUTS = "non_embed_inputs"
dataset = tf.data.Dataset.from_generator(dl,
{FAV: tf.float32, UNFAV: tf.float32, NON_EMBED_INPUTS: tf.float32, RATING: tf.float32},
{FAV: tf.TensorShape([40]), UNFAV: tf.TensorShape([34]), NON_EMBED_INPUTS: tf.TensorShape([308]), RATING: tf.TensorShape([2])})
fields_to_save = [FAV, UNFAV, NON_EMBED_INPUTS]
def serialize_example(*example:List[tf.Tensor]):
"""
Creates a tf.Example message ready to be written to a file.
"""
# Create a dictionary mapping the feature name to the tf.Example-compatible data type.
fields = {field: dtype_feature_map[example[i].dtype](example[i]) for i, field in enumerate(fields_to_save)}
# Create a Features message using tf.train.Example.
example_proto = tf.train.Example(features=tf.train.Features(feature=fields))
return example_proto.SerializeToString()
def tf_serialize_example(example:Dict):
tf_string = tf.py_function(
serialize_example,
[example[field] for field in fields_to_save], # pass these args to the above function.
tf.string) # the return type is `tf.string`.
return tf.reshape(tf_string, ()) # The result is a scalar
# The following functions can be used to convert a value to a type compatible
# with tf.Example.
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
if len(value.shape) > 0:
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
if len(value.shape) > 0:
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
dtype_feature_map = {
tf.dtypes.string: _bytes_feature,
| |
or \
last_batch is not None:
raise ValueError("batch_size, shuffle, sampler and last_batch must " \
"not be specified if batch_sampler is specified.")
self._batch_sampler = batch_sampler
self._num_workers = num_workers if num_workers >= 0 else 0
if batchify_fn is None:
if num_workers > 0:
self._batchify_fn = _batchify.Stack(use_shared_mem=True)
else:
self._batchify_fn = _batchify.Stack()
else:
self._batchify_fn = batchify_fn
def __iter__(self):
if self._num_workers == 0:
def same_process_iter():
for batch in self._batch_sampler:
ret = self._batchify_fn([self._dataset[idx] for idx in batch])
if self._pin_memory:
ret = _as_in_context(ret, context.cpu_pinned(self._pin_device_id))
yield ret
return same_process_iter()
# multi-worker
return _MultiWorkerIterV1(self._num_workers, self._dataset,
self._batchify_fn, self._batch_sampler,
self._pin_memory, self._pin_device_id)
def __len__(self):
return len(self._batch_sampler)
def _thread_worker_initializer(active_shape, active_array):
"""Initializer for ThreadPool."""
set_np(shape=active_shape, array=active_array)
_worker_dataset = None
def _worker_initializer(dataset, active_shape, active_array):
"""Initialier for processing pool."""
# global dataset is per-process based and only available in worker processes
# this is only necessary to handle MXIndexedRecordIO because otherwise dataset
# can be passed as argument
global _worker_dataset
_worker_dataset = dataset
set_np(shape=active_shape, array=active_array)
def _worker_fn(samples, batchify_fn, dataset=None):
"""Function for processing data in worker process."""
# pylint: disable=unused-argument
# it is required that each worker process has to fork a new MXIndexedRecordIO handle
# preserving dataset as global variable can save tons of overhead and is safe in new process
global _worker_dataset
batch = batchify_fn([_worker_dataset[i] for i in samples])
buf = io.BytesIO()
ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(batch)
return buf.getvalue()
def _thread_worker_fn(samples, batchify_fn, dataset):
"""Threadpool worker function for processing data."""
return batchify_fn([dataset[i] for i in samples])
class _MultiWorkerIter(object):
"""Internal multi-worker iterator for DataLoader."""
def __init__(self, worker_pool, batchify_fn, batch_sampler, pin_memory=False,
pin_device_id=0, worker_fn=_worker_fn, prefetch=0, dataset=None,
data_loader=None, timeout=120):
self._worker_pool = worker_pool
self._batchify_fn = batchify_fn
self._batch_sampler = batch_sampler
self._data_buffer = {}
self._rcvd_idx = 0
self._sent_idx = 0
self._iter = iter(self._batch_sampler)
self._worker_fn = worker_fn
self._pin_memory = pin_memory
self._pin_device_id = pin_device_id
self._dataset = dataset
self._data_loader = data_loader
self._timeout = timeout
# pre-fetch
for _ in range(prefetch):
self._push_next()
def __len__(self):
return len(self._batch_sampler)
def _push_next(self):
"""Assign next batch workload to workers."""
r = next(self._iter, None)
if r is None:
return
async_ret = self._worker_pool.apply_async(
self._worker_fn, (r, self._batchify_fn, self._dataset))
self._data_buffer[self._sent_idx] = async_ret
self._sent_idx += 1
def __next__(self):
self._push_next()
if self._rcvd_idx == self._sent_idx:
assert not self._data_buffer, "Data buffer should be empty at this moment"
raise StopIteration
assert self._rcvd_idx < self._sent_idx, "rcvd_idx must be smaller than sent_idx"
assert self._rcvd_idx in self._data_buffer, "fatal error with _push_next, rcvd_idx missing"
ret = self._data_buffer.pop(self._rcvd_idx)
try:
if self._dataset is None:
batch = pickle.loads(ret.get(self._timeout))
else:
batch = ret.get(self._timeout)
if self._pin_memory:
batch = _as_in_context(batch, context.cpu_pinned(self._pin_device_id))
self._rcvd_idx += 1
return batch
except multiprocessing.context.TimeoutError:
msg = '''Worker timed out after {} seconds. This might be caused by \n
- Slow transform. Please increase timeout to allow slower data loading in each worker.
'''.format(self._timeout)
if not isinstance(self._worker_pool, multiprocessing.pool.ThreadPool):
msg += '''- Insufficient shared_memory if `timeout` is large enough.
Please consider reduce `num_workers` or increase shared_memory in system.
'''
print(msg)
raise
except Exception:
self._worker_pool.terminate()
raise
def next(self):
return self.__next__()
def __iter__(self):
return self
class DataLoader(object):
"""Loads data from a dataset and returns mini-batches of data.
Parameters
----------
dataset : Dataset
Source dataset. Note that numpy and mxnet arrays can be directly used
as a Dataset.
batch_size : int
Size of mini-batch.
shuffle : bool
Whether to shuffle the samples.
sampler : Sampler
The sampler to use. Either specify sampler or shuffle, not both.
last_batch : {'keep', 'discard', 'rollover'}
How to handle the last batch if batch_size does not evenly divide
``len(dataset)``.
keep - A batch with less samples than previous batches is returned.
discard - The last batch is discarded if its incomplete.
rollover - The remaining samples are rolled over to the next epoch.
batch_sampler : Sampler
A sampler that returns mini-batches. Do not specify batch_size,
shuffle, sampler, and last_batch if batch_sampler is specified.
batchify_fn : callable
Callback function to allow users to specify how to merge samples
into a batch. Defaults to `gluon.data.batchify.Stack()`.
.. code-block:: python
def default_batchify_fn(data):
if isinstance(data[0], nd.NDArray):
return nd.stack(*data)
elif isinstance(data[0], np.ndarray):
return np.stack(*data)
elif isinstance(data[0], tuple):
data = zip(*data)
return [default_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
return np.ndarray(data, dtype=data.dtype)
num_workers : int, default 0
The number of multiprocessing workers to use for data preprocessing.
pin_memory : boolean, default False
If ``True``, the dataloader will copy NDArrays into pinned memory
before returning them. Copying from CPU pinned memory to GPU is faster
than from normal CPU memory.
pin_device_id : int, default 0
The device id to use for allocating pinned memory if pin_memory is ``True``
prefetch : int, default is `num_workers * 2`
The number of prefetching batches only works if `num_workers` > 0.
If `prefetch` > 0, it allow worker process to prefetch certain batches before
acquiring data from iterators.
Note that using large prefetching batch will provide smoother bootstrapping performance,
but will consume more shared_memory. Using smaller number may forfeit the purpose of using
multiple worker processes, try reduce `num_workers` in this case.
By default it defaults to `num_workers * 2`.
thread_pool : bool, default False
If ``True``, use threading pool instead of multiprocessing pool. Using threadpool
can avoid shared memory usage. If `DataLoader` is more IO bounded or GIL is not a killing
problem, threadpool version may achieve better performance than multiprocessing.
timeout : int, default is 120
The timeout in seconds for each worker to fetch a batch data. Only modify this number
unless you are experiencing timeout and you know it's due to slow data loading.
Sometimes full `shared_memory` will cause all workers to hang and causes timeout. In these
cases please reduce `num_workers` or increase system `shared_memory` size instead.
try_nopython : bool or None, default is None
Try compile python dataloading pipeline into pure MXNet c++ implementation. The benefit is
potentially faster iteration, no `shared_memory` usage, and less processes managed by python.
The compilation is not gauranteed to support all use cases, but it will fallback to python in
case of failure. You can set `try_nopython` to `False` to disable auto-detection of the
compilation feature or leave it to `None` to allow MXNet to determine it automatically.
If you request `try_nopython` to `True` and the compilation fails, it will raise a
RuntimeError with the failure reason.
"""
def __init__(self, dataset, batch_size=None, shuffle=False, sampler=None,
last_batch=None, batch_sampler=None, batchify_fn=None,
num_workers=0, pin_memory=False, pin_device_id=0,
prefetch=None, thread_pool=False, timeout=120, try_nopython=None):
self._dataset = dataset
self._pin_memory = pin_memory
self._pin_device_id = pin_device_id
self._thread_pool = thread_pool
self._timeout = timeout
self._mx_iter = None
assert timeout > 0, "timeout must be positive, given {}".format(timeout)
if batch_sampler is None:
if batch_size is None:
raise ValueError("batch_size must be specified unless " \
"batch_sampler is specified")
if sampler is None:
if shuffle:
sampler = _sampler.RandomSampler(len(dataset))
else:
sampler = _sampler.SequentialSampler(len(dataset))
elif shuffle:
raise ValueError("shuffle must not be specified if sampler is specified")
batch_sampler = _sampler.BatchSampler(
sampler, batch_size, last_batch if last_batch else 'keep')
elif batch_size is not None or shuffle or sampler is not None or \
last_batch is not None:
raise ValueError("batch_size, shuffle, sampler and last_batch must " \
"not be specified if batch_sampler is specified.")
self._batch_sampler = batch_sampler
self._num_workers = num_workers if num_workers >= 0 else 0
self._worker_pool = None
self._prefetch = max(0, int(prefetch) if prefetch is not None else 2 * self._num_workers)
if batchify_fn is None:
if num_workers > 0:
self._batchify_fn = _batchify.Stack(use_shared_mem=True)
else:
self._batchify_fn = _batchify.Stack()
else:
self._batchify_fn = batchify_fn
if num_workers > 0 and (try_nopython or try_nopython is None):
# check for capability to use mx backend threadedLoader
use_mx_iter, mx_iter_args = _check_mx_loader_capability(
self._dataset, self._batch_sampler, self._batchify_fn)
if not use_mx_iter:
if try_nopython:
raise RuntimeError(mx_iter_args)
else:
use_mx_iter = False
if use_mx_iter:
logging.info("Using MXNet backend ThreadedDataLoader with %s workers "
"instead of python dataloader.", self._num_workers)
self._mx_iter = _MXThreadedDataLoader(
num_workers=self._num_workers,
pin_memory=self._pin_memory,
pin_device_id=self._pin_device_id,
prefetch=self._prefetch, **mx_iter_args)
else:
nd.waitall()
import gc
gc.collect()
nd.waitall()
if self._num_workers > 0:
if self._thread_pool:
self._worker_pool = ThreadPool(self._num_workers,
initializer=_thread_worker_initializer,
initargs=(is_np_shape(), is_np_array()))
else:
# set ignore keyboard interupt signal before forking processes
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
self._worker_pool = multiprocessing.Pool(
self._num_workers, initializer=_worker_initializer,
initargs=[self._dataset, is_np_shape(), is_np_array()])
# resume keyboard interupt signal in main process
signal.signal(signal.SIGINT, original_sigint_handler)
def __iter__(self):
if self._mx_iter is not None:
| |
from pprint import pprint
import enum
from urllib.parse import urlparse, parse_qs
import sys
import pdsupport
# creating enumerations using class
class States(enum.Enum):
old = 0
unchanged = 1
changed = 2
new = 3
class PyvoSheet:
def __init__(self, name, table, style, template):
self.name = name
self.table = table
self.style = style
self.template = template
def __repr__(self):
return {'name':self.name}
def __str__(self):
return 'pyvoSheet(name='+self.name+ ')'
class PyvotabElement(dict):
def __init__(self, value, singletab, parent, isNew, source_style, debug ):
self.parent = parent
self.dimension = 0
self.source_style = source_style
self.value = value
self.singletab = singletab
self.debug = debug
# 0= old, 1= unchanged, 2 = new
if not isNew:
self.change_state = States.old
else:
self.change_state = States.new
def increaseDimension(self):
self.dimension += 1
if self.parent:
self.parent.increaseDimension()
def set_change_state(self, child_state):
''' recalculates the change_state
'''
if self.change_state == States.changed: # there's no more left to set
return
if self.change_state==States.old and child_state==States.unchanged:
self.change_state=States.unchanged
if self.change_state != child_state:
self.change_state = States.changed
if self.parent:
self.parent.set_change_state(self.change_state)
def add(self, value, isNew, source_style):
''' set value and calculates old/new/unchanged stage
Parameters
----------
value : scalar
value to set
isNew : bool
flag if this value is been seen as old or new
source_style
identifier of the data source. Not used by program, but maintained as reference
to tell the user where the resulting table cells are coming from
'''
self.source_style = source_style
self.increaseDimension()
if not value in self:
self[value] = PyvotabElement(value,self.singletab,self, isNew, source_style, self.debug)
return self[value]
def MakeValue(self, value, rowDt, colDt, isNew):
''' set value and calculates old/new/unchanged stage
Parameters
----------
value : scalar
value to set ,use 'None' to mark an initial endpoint
rowDt : Pyvotab
reference to the row Pyvotab table
colDt : Pyvotab
reference to the col Pyvotab table
isNew : bool
flag if this value is been seen as old or new
'''
self.rowDt = rowDt
self.colDt = colDt
if self.value:
if self.change_state==States.old and isNew:
self.change_state=States.unchanged
if self.value != value:
self.change_state = States.changed
rowDt.set_change_state(self.change_state)
colDt.set_change_state(self.change_state)
self.value = value
def getEndPoint(self, myhash):
''' returns value cell object, if is endpoint, otherways None
Parameters
----------
myhash : string
hash representing the aligned row/col endpoint
'''
try:
return self[hash(myhash)]
except:
return None
def setEndPoint(self, newEndPoint, myhash):
'''
defines a node as an end point, means as bottom/rightmost object in the row/column header.
Stores the hash of the aligned row/col endpoint
Parameters
----------
newEndPoint : object
value cell object, containing all data about a value cell
myhash : string
hash representing the aligned row/col endpoint
'''
self[hash(myhash)] = newEndPoint
self.isEndStup = True
def setPrintCoords(self, startCoord, level, xDirection):
'''
set the final coordinate of a node
Parameters
----------
startCoord : int
top/leftmost coordinate of that node, given by the parent nodes
level : int
entry level of that node
xDirection : bool
tells, if the object is located in the column header (True) or row header (False)
'''
if xDirection:
self.printY = startCoord
else:
self.printX = startCoord
def calPrintCoords(self, startCoord, level, xDirection):
'''
calculates the final coordinate of a node
Parameters
----------
startCoord : int
top/leftmost coordinate of that node, given by the parent nodes
level : int
entry level of that node
xDirection : bool
tells, if the object is located in the column header (True) or row header (False)
'''
self.startCoord = startCoord
self.blockSize = startCoord
self.level = level
isEnd = False
try:
self.isEndStup
isEnd = True
except: # this is no endstup
startCoord -= 1 # reduce it, so that the return value is the same as the start in case this element and its subs covers only 1 row/column
for index in sorted(self.keys()):
if isEnd:
self[index].setPrintCoords(startCoord, level, xDirection)
else:
startCoord += 1
startCoord = self[index].calPrintCoords(
startCoord, level+1, xDirection)
self.blockSize = startCoord-self.blockSize+1
return startCoord
def depth(self, actLevel):
'''
calculates the child node depth of a node
Parameters
----------
actLevel : int
entry level of that node
'''
resultLevel = actLevel
for index in self:
try:
self[index].isEndStup
return actLevel
except:
newLevel = self[index].depth(actLevel+1)
if newLevel > resultLevel:
resultLevel = newLevel
return resultLevel
def fillPrintGridValue(self, multiplier, xDirection, fillFunction):
'''
calculates output table content and dimensions for a single cell
Parameters
----------
multiplier : int
tells, about how many cells the object is extended (based on the number of child object)
xDirection : bool
tells, if the object is located in the column header (True) or row header (False)
fillFunction : function
function which copies the content into the custom table
'''
if self.debug:
value = "'{0}' ({1})".format(
self.value, self.change_state)
else:
value = self.value
this_style=self.source_style
if self.change_state == States.old:
this_style = self.singletab.old_style
if self.change_state == States.changed:
this_style = self.singletab.change_style
if self.change_state == States.new:
this_style = self.singletab.new_style
# to have some space for the heading names, we move the value cells 1 row downwards (self.printY + 1)
fillFunction(value, self.printX , self.printY+1,
xDirection, 1, this_style)
def fillPrintGrid(self, multiplier, xDirection, fillFunction):
'''
calculates output table content and dimensions
Parameters
----------
multiplier : int
tells, about how many cells the object is extended (based on the number of child object)
xDirection : bool
tells, if the object is located in the column header (True) or row header (False)
fillFunction : function
function which copies the content into the custom table
'''
for index in sorted(self.keys()):
isEnd = False
try:
self.isEndStup
isEnd = True
except:
pass
if isEnd:
self[index].fillPrintGridValue(
multiplier, xDirection, fillFunction)
else:
# determine correct style based on, if the element is old or not
this_style = self[index].source_style
if self[index].change_state == States.old:
this_style = self.singletab.old_style
if self[index].change_state == States.changed:
this_style = self.singletab.change_style
if self[index].change_state == States.new:
this_style = self.singletab.new_style
if self.debug:
value = "'{0}' ({1}) {2}".format(index, self[index].change_state,self[index].blockSize)
else:
value = index
if xDirection:
fillFunction(
# to have some space for the heading names, we move the value cells 1 row downwards (self[index].startCoord +1)
value, self.level, self[index].startCoord +1, xDirection, self[index].blockSize, this_style)
# in case we have a multiple cell span, we need to set some empty filler
for i in range(1,self[index].blockSize):
fillFunction(
None, self.level, self[index].startCoord+i+1, xDirection, self[index].blockSize, this_style)
else:
fillFunction(
value, self[index].startCoord , self.level , xDirection, self[index].blockSize, this_style)
# in case we have a multiple cell span, we need to set some empty filler
for i in range(1,self[index].blockSize):
fillFunction(
None, self[index].startCoord + 1 +i , self.level, xDirection, self[index].blockSize, this_style)
self[index].fillPrintGrid(
multiplier, xDirection, fillFunction)
def pprint(self):
''' debug print
'''
if self:
for value in self.values():
print("{{'{0}'".format(hex(id(value))), end='')
if value.change_state == States.old:
print("-", end="")
if value.change_state == States.new:
print("+", end="")
print(": ", end='')
value.pprint()
print('}} ', end='')
else:
print("<{0}> {1}".format(self.value, self.change_state), end='')
class SingleTab:
def __init__( self, headers, old_style, new_style, change_style,row_header_style, col_header_style, debug):
'''
Creates a SingleTab object, representing a single output table
Parameters
----------
header: dict
contains the rows and cols header descriptions
old_style,
new_style,
change_style,
row_header_style,
col_header_style : Object
The style objects defines how the cells should be formated in the final table. These objects are not used or mofified
by pyvotab at all, there are only passed through into the result table to allow the user to define the wanted formats
'''
self.rowTd = PyvotabElement('row',self,None, False, -1, debug)
self.colTd = PyvotabElement('col',self,None, False, -1, debug)
self.old_style = old_style
self.new_style = new_style
self.row_header_style = row_header_style
self.col_header_style = col_header_style
self.change_style = change_style
self.headers = headers
self.debug = debug
def get_sheet_style(self):
first_element = next(iter( self.rowTd.values() ))
initial_change_state=first_element.change_state
default_style=first_element.source_style
for element in self.rowTd.values():
if initial_change_state != element.change_state:
initial_change_state=States.changed
break
if initial_change_state!=States.changed:
for element in self.colTd.values():
if initial_change_state != element.change_state:
initial_change_state=States.changed
break
if initial_change_state == States.old:
return self.old_style
if initial_change_state == States.changed:
return self.change_style
if initial_change_state == States.new:
return self.new_style
return default_style
def headerrows(self):
'''returns the number of cells on top of the resulting table, before the data cells starts
'''
return self.rowTd.depth(1)
def headercols(self):
'''returns the number of cells on the left side of the resulting table, before the data cells starts
'''
return self.colTd.depth(1)
def layoutGrid(self):
''' starts the process to transform the data tree into their table position and sizes
'''
self.rowTd.calPrintCoords(self.colTd.depth(1), 0, True)
self.colTd.calPrintCoords(self.rowTd.depth(1), 0, False)
def getPrintDict(self):
''' translates the internal data tree structure with it's calculated x/y positions (by layoutgrid()) into its x/y table representation for printout
'''
count_of_header_rows = self.headerrows()
count_of_header_cols = self.headercols()
self.ptdict = ptPrintDict()
for index in range(len(self.headers['cols'])):
self.printfunction(self.headers['cols'][index], count_of_header_rows-1 , index , False, 1, self.col_header_style)
for index in range(len(self.headers['rows'])):
self.printfunction(self.headers['rows'][index], index , count_of_header_cols , False, 1, self.row_header_style)
self.rowTd.fillPrintGrid(1, True, self.printfunction)
self.colTd.fillPrintGrid(1, False, self.printfunction)
def printfunction(self, value, px, py, xDirection, blocksize, style):
'''
storage function to fill the result ptPrintDict- Table by the x/y coords, the value, the cell direction, the row/col span and the wanted style
'''
try:
self.ptdict[px]
except:
self.ptdict[px] = {}
if value:
self.ptdict[px][py] = {
"value": value, "style": style, "size": blocksize, "xDir": xDirection}
else:
self.ptdict[px][py] = None
if self.ptdict.xSize < px + 1:
self.ptdict.xSize = px +1
if self.ptdict.ySize < py + 1:
self.ptdict.ySize = py +1
class PyvoStyles:
def __init__(self, old_style, new_style, change_style, row_header_style, col_header_style):
'''
Contains a pre- defined set of styles
Parameters
----------
old_style,
new_style,
change_style,
row_header_style,
col_header_style : Object
The style objects defines how the cells should be formated in the final table. These objects are not used or mofified
by pyvotab at all, there are only passed through into the result table to allow the user to define the wanted formats
'''
self.old_style = old_style
self.new_style = new_style
self.change_style = change_style
self.row_header_style = row_header_style
self.col_header_style = col_header_style
class Pyvotab:
def __init__(self, pyvo_styles, layout, debug= False):
'''
Creates a Pyvotab object
Parameters
----------
pyvo_styles: PyvoStyles
The styles object defines how the cells should be formated in the final table. These objects are not used or mofified
by pyvotab at all, there are only passed through into the result table to allow the user to define the wanted formats
page: int or string
If int, it's define the column which should be used as page. If string, it's handled as single page, refered py the page name
layout : dict or string
contains the layout parameters, either in an url coded string or an dict. The parameter are
page: int or string
If int, it's define the column which should be used as page. If string, it's handled as single page, refered py the page name
source: string
name of the excel sheet which should be used as data table source, can | |
# Copyright (c) 2018 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import fixtures
import tempfile
import webob.exc
from tooz import coordination
from unittest import mock
from network_runner import api
from network_runner.types import validators
from neutron.common import test_lib
from neutron.objects import network
from neutron.objects import ports
from neutron.objects import trunk
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron import quota
from neutron.tests.unit.plugins.ml2 import test_plugin
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import provider_net
from neutron_lib.callbacks import resources
from networking_ansible import constants as c
from networking_ansible import exceptions as netans_ml2exc
from networking_ansible.tests.unit import base
class TestLibTestConfigFixture(fixtures.Fixture):
def __init__(self):
self._original_test_config = None
def _setUp(self):
self.addCleanup(self._restore)
self._original_test_config = test_lib.test_config.copy()
def _restore(self):
if self._original_test_config is not None:
test_lib.test_config = self._original_test_config
class NetAnsibleML2Base(test_plugin.Ml2PluginV2TestCase):
def setUp(self):
base.patch_neutron_quotas()
with mock.patch.object(validators.ChoiceValidator, '__call__',
return_value=None):
with mock.patch(c.COORDINATION) as m_coord:
m_coord.get_coordinator = lambda *args: mock.create_autospec(
coordination.CoordinationDriver).return_value
super(NetAnsibleML2Base, self).setUp()
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver._is_port_supported')
@mock.patch('networking_ansible.ml2.mech_driver.provisioning_blocks',
autospec=True)
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver.ensure_port')
class TestBindPort(base.NetworkingAnsibleTestCase):
def test_bind_port_not_supported(self,
mock_ensure_port,
mock_prov_blocks,
mock_port_supported):
mock_port_supported.return_value = False
self.mech.bind_port(self.mock_port_context)
mock_ensure_port.assert_not_called()
def test_bind_port_supported(self,
mock_ensure_port,
mock_prov_blocks,
mock_port_supported):
mock_port_supported.return_value = True
self.mech.bind_port(self.mock_port_context)
mock_ensure_port.assert_called_once_with(
self.mock_port_context.current,
self.mock_port_context._plugin_context,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_context,
self.testsegid)
class TestIsPortSupported(base.NetworkingAnsibleTestCase):
def test_is_port_supported_baremetal(self):
self.assertTrue(
self.mech._is_port_supported(self.mock_port_bm))
def test_is_port_supported_normal(self):
self.assertTrue(
self.mech._is_port_supported(self.mock_port_vm))
def test_is_port_supported_direct(self):
self.assertTrue(
self.mech._is_port_supported(self.mock_port_dt))
def test_is_port_supported_invalid(self):
self.mock_port_bm.dict[c.DEVICE_OWNER] = 'invalid'
self.assertFalse(
self.mech._is_port_supported(self.mock_port_bm))
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver._is_port_supported')
class TestIsPortBound(base.NetworkingAnsibleTestCase):
def test_is_port_bound(self, mock_port_supported):
mock_port_supported.return_value = True
self.assertTrue(
self.mech._is_port_bound(self.mock_port_bm))
def test_is_port_bound_normal(self, mock_port_supported):
mock_port_supported.return_value = True
self.assertTrue(
self.mech._is_port_bound(self.mock_port_vm))
def test_is_port_bound_direct(self, mock_port_supported):
mock_port_supported.return_value = True
self.assertTrue(
self.mech._is_port_bound(self.mock_port_dt))
def test_is_port_bound_not_other(self, mock_port_supported):
self.mock_port_bm.dict['binding:vif_type'] = 'not-other'
self.assertFalse(
self.mech._is_port_bound(self.mock_port_bm))
def test_is_port_bound_port_not_supported(self, mock_port_supported):
mock_port_supported.return_value = False
self.assertFalse(
self.mech._is_port_bound(self.mock_port_bm))
@mock.patch.object(network.Network, 'get_object')
@mock.patch.object(api.NetworkRunner, 'create_vlan')
class TestCreateNetworkPostCommit(base.NetworkingAnsibleTestCase):
def test_create_network_postcommit(self,
mock_create_network,
mock_get_network):
mock_get_network.return_value = self.mock_net
self.mech.create_network_postcommit(self.mock_net_context)
mock_create_network.assert_called_once_with(self.testhost,
self.testsegid)
def test_create_network_postcommit_manage_vlans_false(self,
mock_create_network,
mock_get_network):
self.m_config.inventory[self.testhost]['manage_vlans'] = False
self.mech.create_network_postcommit(self.mock_net_context)
mock_create_network.assert_not_called()
def test_create_network_postcommit_fails(self,
mock_create_network,
mock_get_network):
mock_create_network.side_effect = Exception()
mock_get_network.return_value = self.mock_net
self.assertRaises(netans_ml2exc.NetworkingAnsibleMechException,
self.mech.create_network_postcommit,
self.mock_net_context)
mock_create_network.assert_called_once_with(self.testhost,
self.testsegid)
def test_create_network_postcommit_not_vlan(self,
mock_create_network,
mock_get_network):
self.mock_net_context.current[provider_net.NETWORK_TYPE] = 'not-vlan'
self.mech.create_network_postcommit(self.mock_net_context)
mock_create_network.assert_not_called()
def test_create_network_postcommit_not_segmentation_id(self,
mock_create_network,
mock_get_network):
self.mock_net_context.current[provider_net.SEGMENTATION_ID] = ''
self.mech.create_network_postcommit(self.mock_net_context)
mock_create_network.assert_not_called()
def test_create_network_postcommit_was_deleted(self,
mock_create_network,
mock_get_network):
mock_get_network.return_value = None
self.mech.create_network_postcommit(self.mock_net_context)
mock_create_network.assert_not_called()
def test_create_network_postcommit_segment_was_deleted(self,
mock_create_network,
mock_get_network):
self.mock_net_context.current[provider_net.SEGMENTATION_ID] = '73'
mock_get_network.return_value = self.mock_net
self.mech.create_network_postcommit(self.mock_net_context)
mock_create_network.assert_not_called()
@mock.patch.object(network.NetworkSegment, 'get_objects')
@mock.patch.object(api.NetworkRunner, 'delete_vlan')
class TestDeleteNetworkPostCommit(base.NetworkingAnsibleTestCase):
def test_delete_network_postcommit(self,
mock_delete_network,
mock_get_segment):
mock_get_segment.return_value = []
self.mech.delete_network_postcommit(self.mock_net_context)
mock_delete_network.assert_called_once_with(self.testhost,
self.testsegid)
def test_delete_network_postcommit_manage_vlans_false(self,
mock_delete_network,
mock_get_segment):
mock_get_segment.return_value = []
self.m_config.inventory[self.testhost]['manage_vlans'] = False
self.mech.delete_network_postcommit(self.mock_net_context)
mock_delete_network.assert_not_called()
def test_delete_network_postcommit_fails(self,
mock_delete_network,
mock_get_segment):
mock_get_segment.return_value = []
mock_delete_network.side_effect = Exception()
self.assertRaises(netans_ml2exc.NetworkingAnsibleMechException,
self.mech.delete_network_postcommit,
self.mock_net_context)
mock_delete_network.assert_called_once_with(self.testhost,
self.testsegid)
def test_delete_network_postcommit_not_vlan(self,
mock_delete_network,
mock_get_segment):
mock_get_segment.return_value = []
self.mock_net_context.current[provider_net.NETWORK_TYPE] = 'not-vlan'
self.mech.delete_network_postcommit(self.mock_net_context)
mock_delete_network.assert_not_called()
def test_delete_network_postcommit_not_segmentation_id(self,
mock_delete_network,
mock_get_segment):
mock_get_segment.return_value = []
self.mock_net_context.current[provider_net.SEGMENTATION_ID] = ''
self.mech.delete_network_postcommit(self.mock_net_context)
mock_delete_network.assert_not_called()
def test_delete_network_postcommit_recreated_segment(self,
mock_delete_network,
mock_get_segment):
mock_get_segment.return_value = [self.mock_netseg]
self.mech.delete_network_postcommit(self.mock_net_context)
mock_delete_network.assert_not_called()
def test_delete_network_postcommit_different_segment(self,
mock_delete_network,
mock_get_segment):
mock_get_segment.return_value = [self.mock_netseg2]
self.mech.delete_network_postcommit(self.mock_net_context)
mock_delete_network.assert_called_once()
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver.ensure_port')
class TestDeletePortPostCommit(base.NetworkingAnsibleTestCase):
def test_delete_port_postcommit_bound(self,
mock_ensure_port):
self.mech.delete_port_postcommit(self.mock_port_context)
mock_ensure_port.assert_called_once_with(
self.mock_port_context.current,
self.mock_port_context._plugin_context,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_context,
self.testsegid,
delete=True)
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver._is_port_bound')
def test_delete_port_postcommit_not_bound(self,
mock_port_bound,
mock_ensure_port):
mock_port_bound.return_value = False
self.mech.delete_port_postcommit(self.mock_port_context)
mock_ensure_port.assert_not_called()
@mock.patch(c.COORDINATION)
@mock.patch('networking_ansible.config.Config')
class TestInit(base.NetworkingAnsibleTestCase):
def test_intialize(self, m_config, m_coord):
m_coord.get_coordinator = lambda *args: mock.create_autospec(
coordination.CoordinationDriver).return_value
m_config.return_value = base.MockConfig()
self.mech.initialize()
m_config.assert_called_once_with()
def test_intialize_w_extra_params(self, m_config, m_coord):
m_coord.get_coordinator = lambda *args: mock.create_autospec(
coordination.CoordinationDriver).return_value
m_config.return_value = base.MockConfig(self.testhost,
self.testmac)
m_config.return_value.add_extra_params()
self.mech.initialize()
self.assertEqual(self.mech.kwargs,
{self.testhost: {'stp_edge': True}})
def test_intialize_w_custom_params(self, m_config, m_coord):
m_coord.get_coordinator = lambda *args: mock.create_autospec(
coordination.CoordinationDriver).return_value
m_config.return_value = base.MockConfig(self.testhost,
self.testmac)
m_config.return_value.add_custom_params()
self.mech.initialize()
self.assertEqual(self.mech.kwargs,
{self.testhost: {'custom': 'param'}})
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver._is_port_bound')
@mock.patch('networking_ansible.ml2.mech_driver.provisioning_blocks',
autospec=True)
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver.ensure_port')
class TestUpdatePortPostCommit(base.NetworkingAnsibleTestCase):
def test_update_port_postcommit_port_bound_curr(self,
mock_ensure_port,
mock_prov_blocks,
mock_port_bound):
mock_port_bound.return_value = True
self.mock_port_context.original = self.mock_port_context.current
self.mech.update_port_postcommit(self.mock_port_context)
mock_prov_blocks.provisioning_complete.assert_called_once_with(
self.mock_port_context._plugin_context,
self.testid,
resources.PORT,
c.NETWORKING_ENTITY)
def test_update_port_postcommit_port_bound_orig(self,
mock_ensure_port,
mock_prov_blocks,
mock_port_bound):
mock_port_bound.side_effect = [False, True]
self.mock_port_context.original = self.mock_port_context.current
self.mech.update_port_postcommit(self.mock_port_context)
mock_ensure_port.assert_called_once_with(
self.mock_port_context.current,
self.mock_port_context._plugin_context,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_context,
self.testsegid)
def test_update_port_postcommit_port_not_bound(self,
mock_ensure_port,
mock_prov_blocks,
mock_port_bound):
mock_port_bound.return_value = False
self.mech.update_port_postcommit(self.mock_port_context)
mock_prov_blocks.provisioning_complete.assert_not_called()
def test_update_port_postcommit_port_w_normal_port(self,
mock_ensure_port,
mock_prov_blocks,
mock_port_bound):
mappings = [(self.testhost, self.testport)]
self.m_config.port_mappings = {self.test_hostid: mappings}
self.mock_port_context.current = self.mock_port_context.original
self.mech.update_port_postcommit(self.mock_port_context)
mock_ensure_port.assert_called_once_with(
self.mock_port_context.current,
self.mock_port_context._plugin_context,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_context,
self.testsegid)
def test_update_port_postcommit_port_w_direct_port(self,
mock_ensure_port,
mock_prov_blocks,
mock_port_bound):
mappings = [(self.testhost, self.testport)]
sriov_host_id = '{}-{}'.format(self.test_hostid, self.test_pci_addr)
self.m_config.port_mappings = {sriov_host_id: mappings}
self.mock_port_context.current = self.mock_port_dt
self.mock_port_context.original = self.mock_port_dt
self.mech.update_port_postcommit(self.mock_port_context)
mock_ensure_port.assert_called_once_with(
self.mock_port_context.current,
self.mock_port_context._plugin_context,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_context,
self.testsegid)
class TestLinkInfo(base.NetworkingAnsibleTestCase):
def test_switch_meta_from_link_info_obj_no_net(self):
mappings, segmentation_id = \
self.mech._switch_meta_from_link_info(self.mock_port_bm)
for switch_name, switch_port in mappings:
self.assertEqual(switch_name, self.testhost)
self.assertEqual(switch_port, self.testport)
self.assertEqual(segmentation_id, '')
def test_switch_meta_from_link_info_obj_net(self):
mappings, segmentation_id = \
self.mech._switch_meta_from_link_info(
self.mock_port_bm,
self.mock_net_context.current)
for switch_name, switch_port in mappings:
self.assertEqual(switch_name, self.testhost)
self.assertEqual(switch_port, self.testport)
self.assertEqual(segmentation_id, self.testsegid)
def test_switch_meta_from_link_info_context_no_net(self):
mappings, segmentation_id = \
self.mech._switch_meta_from_link_info(
self.mock_port_context.current)
for switch_name, switch_port in mappings:
self.assertEqual(switch_name, self.testhost)
self.assertEqual(switch_port, self.testport)
self.assertEqual(segmentation_id, '')
def test_switch_meta_from_link_info_context_net(self):
mappings, segmentation_id = \
self.mech._switch_meta_from_link_info(
self.mock_port_context.current,
self.mock_net_context.current)
for switch_name, switch_port in mappings:
self.assertEqual(switch_name, self.testhost)
self.assertEqual(switch_port, self.testport)
self.assertEqual(segmentation_id, self.testsegid)
def test_switch_meta_from_link_info_context_no_name(self):
self.mock_port_bm.bindings[0].profile = self.profile_lli_no_info
self.m_config.mac_map = {self.testmac.upper(): self.testhost + '+map'}
mappings, segmentation_id = \
self.mech._switch_meta_from_link_info(
self.mock_port_bm)
for switch_name, switch_port in mappings:
self.assertEqual(switch_name, self.testhost + '+map')
self.assertEqual(switch_port, self.testport)
self.assertEqual(segmentation_id, '')
def test_switch_meta_from_link_info_context_no_lli(self):
self.mock_port_bm.bindings[0].profile[c.LLI] = {}
self.assertRaises(netans_ml2exc.LocalLinkInfoMissingException,
self.mech._switch_meta_from_link_info,
self.mock_port_bm)
def test_link_info_from_port_port_not_supported(self):
# If this test fails from a missing key in the future
# it's generall safe to just throw the key into this dict
# it's testing a case when the value passed it not a port
# object. So we just port properties into a dict so the
# properties are there but it's not a proper port object
port = {'id': self.testid,
portbindings.VNIC_TYPE: 'not-supported',
c.DEVICE_OWNER: c.BAREMETAL_NONE}
self.assertRaises(netans_ml2exc.LocalLinkInfoMissingException,
self.mech._switch_meta_from_link_info,
port)
@mock.patch('network_runner.api.NetworkRunner.delete_port')
class TestDeleteSwitchPort(base.NetworkingAnsibleTestCase):
def test_delete_switch_port_fails(self, mock_delete):
mock_delete.side_effect = Exception()
self.assertRaises(ml2_exc.MechanismDriverError,
self.mech._delete_switch_port,
1,
2)
def test_delete_switch_port(self, mock_delete):
self.mech._delete_switch_port(self.testhost, self.testport)
mock_delete.assert_called_once_with(self.testhost, self.testport)
@mock.patch.object(ports.Port, 'get_objects')
@mock.patch.object(network.Network, 'get_object')
class TestIsDeletedPortInUse(base.NetworkingAnsibleTestCase):
def test_is_in_use_no_ports(self,
mock_net_get_object,
mock_port_get_objects):
mock_port_get_objects.return_value = []
mock_net_get_object.return_value = self.mock_net
self.assertFalse(
self.mech._is_deleted_port_in_use(self.testphysnet, 2, 3))
def test_is_in_use_one_port(self,
mock_net_get_object,
mock_port_get_objects):
mock_port_get_objects.return_value = [self.mock_port_bm]
mock_net_get_object.return_value = self.mock_net
self.assertTrue(
self.mech._is_deleted_port_in_use(self.testphysnet, 2, 3))
def test_is_in_use_one_port_virtnet(self,
mock_net_get_object,
mock_port_get_objects):
mock_port_get_objects.return_value = [self.mock_port_bm]
mock_net_get_object.return_value = self.mock_net
self.mock_net.segments = [self.mock_netseg3]
self.assertFalse(
self.mech._is_deleted_port_in_use(self.testphysnet, 2, 3))
def test_is_in_use_two_port_virtnet_and_physnet(self,
mock_net_get_object,
mock_port_get_objects):
mock_port_get_objects.return_value = [self.mock_port_bm,
self.mock_port_vm]
self.mock_net.segments = [self.mock_netseg3]
mock_net2 = mock.create_autospec(
network.Network).return_value
mock_net2.segments = [self.mock_netseg]
mock_net_get_object.side_effect = [self.mock_net, mock_net2]
self.assertTrue(
self.mech._is_deleted_port_in_use(self.testphysnet, 2, 3))
def test_is_in_use_one_port_no_net(self,
mock_net_get_object,
mock_port_get_objects):
mock_port_get_objects.return_value = [self.mock_port_bm]
mock_net_get_object.return_value = None
self.assertFalse(
self.mech._is_deleted_port_in_use(self.testphysnet, 2, 3))
def test_is_in_use_one_port_no_binding(self,
mock_net_get_object,
mock_port_get_objects):
self.mock_port_bm.bindings.pop()
mock_port_get_objects.return_value = [self.mock_port_bm]
mock_net_get_object.return_value = self.mock_net
self.assertFalse(
self.mech._is_deleted_port_in_use(self.testphysnet, 2, 3))
def test_is_in_use_one_port_no_lli(self,
mock_net_get_object,
mock_port_get_objects):
self.mock_port_bm.bindings[0].profile = {}
mock_port_get_objects.return_value = [self.mock_port_bm]
mock_net_get_object.return_value = self.mock_net
self.assertFalse(
self.mech._is_deleted_port_in_use(self.testphysnet, 2, 3))
@mock.patch.object(coordination.CoordinationDriver, 'get_lock')
@mock.patch.object(ports.Port, 'get_object')
@mock.patch('network_runner.api.NetworkRunner.has_host')
class TestEnsurePort(base.NetworkingAnsibleTestCase):
def test_ensure_port_no_host(self,
mock_has_host,
mock_port_get_object,
mock_get_lock):
mock_has_host.return_value = False
self.assertRaises(ml2_exc.MechanismDriverError,
self.mech.ensure_port,
self.mock_port_context.current,
self.mock_port_context._plugin_context,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_context,
self.testsegid)
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver._delete_switch_port')
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver._is_deleted_port_in_use')
def test_ensure_port_no_port_delete(self,
mock_is_deleted,
mock_delete_port,
mock_has_host,
mock_port_get_object,
mock_get_lock):
mock_port_get_object.return_value = None
mock_is_deleted.return_value = False
self.mech.ensure_port(
self.mock_port_context.current,
self.mock_port_context._plugin_context,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_context,
self.testsegid)
mock_delete_port.assert_called_once_with(self.testhost, self.testport)
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver._delete_switch_port')
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver._is_deleted_port_in_use')
def test_ensure_port_no_port_keep(self,
mock_is_deleted,
mock_delete_port,
mock_has_host,
mock_port_get_object,
mock_get_lock):
mock_port_get_object.return_value = None
mock_is_deleted.return_value = True
self.mech.ensure_port(
self.mock_port_context.current,
self.mock_port_context._plugin_context,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_context,
self.testsegid)
mock_delete_port.assert_not_called()
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver._set_port_state')
def test_ensure_port_set_port_state(self,
mock_set_state,
mock_has_host,
mock_port_get_object,
mock_get_lock):
mock_port_get_object.return_value = self.mock_port_bm
self.mech.ensure_port(
self.mock_port_context.current,
self.mock_port_context._plugin_context,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_context,
self.testsegid)
mock_set_state.assert_called_once_with(
self.mock_port_bm,
self.mock_port_context._plugin_context,
self.testhost,
self.testport)
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver._set_port_state')
def test_ensure_port_set_port_state_binding(self,
mock_set_state,
mock_has_host,
mock_port_get_object,
mock_get_lock):
mock_port_get_object.return_value = self.mock_port_bm
mock_set_state.return_value = True
self.mech.ensure_port(
self.mock_port_context.current,
self.mock_port_context._plugin_context,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_context,
self.testsegid)
mock_set_state.assert_called_once_with(
self.mock_port_bm,
self.mock_port_context._plugin_context,
self.testhost,
self.testport)
self.mock_port_context.set_binding.assert_called_once()
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver._set_port_state')
def test_ensure_port_set_port_state_no_binding(self,
mock_set_state,
mock_has_host,
mock_port_get_object,
mock_get_lock):
mock_port_get_object.return_value = self.mock_port_bm
mock_set_state.return_value = False
self.mech.ensure_port(
self.mock_port_context.current,
self.mock_port_context._plugin_context,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_context,
self.testsegid)
mock_set_state.assert_called_once_with(
self.mock_port_bm,
self.mock_port_context._plugin_context,
self.testhost,
self.testport)
self.mock_port_context.set_binding.assert_not_called()
@mock.patch('networking_ansible.ml2.mech_driver.'
'AnsibleMechanismDriver._set_port_state')
def test_ensure_port_normal_port_delete_false(self,
mock_set_state,
mock_has_host,
mock_port_get_object,
mock_get_lock):
self.mech.ensure_port(self.mock_port_vm,
self.mock_port_vm,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_vm,
self.testsegid,
delete=False)
mock_set_state.assert_called_with(self.mock_port_vm,
self.mock_port_vm,
self.testhost,
self.testport)
@mock.patch('network_runner.api.NetworkRunner.delete_trunk_vlan')
@mock.patch.object(ports.Port, 'get_objects')
def test_ensure_port_normal_port_delete_true(self,
mock_get_objects,
mock_delete_vlan,
mock_has_host,
mock_port_get_object,
mock_get_lock):
self.mech.ensure_port(self.mock_port_vm,
self.mock_port_vm,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_vm,
self.testsegid,
delete=True)
mock_delete_vlan.assert_called_with(self.testhost,
self.testport,
self.testsegid)
@mock.patch('network_runner.api.NetworkRunner.delete_trunk_vlan')
@mock.patch.object(ports.Port, 'get_objects')
def test_ensure_port_no_delete_w_active_ports_vm(self,
mock_get_objects,
mock_delete_vlan,
mock_has_host,
mock_port_get_object,
mock_get_lock):
'''
Check if there are port bindings associated with the same
compute host that the port is being deleted from. If there
are other port bindings on that compute node make sure that
the vlan won't be deleted from the compute node's switchport
so that the other portbindings don't loose connectivity
'''
mock_get_objects.return_value = [self.mock_port_bm]
self.mech.ensure_port(self.mock_port_vm,
self.mock_port_vm,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_vm,
self.testsegid,
delete=True)
mock_delete_vlan.assert_not_called()
@mock.patch('network_runner.api.NetworkRunner.delete_trunk_vlan')
@mock.patch.object(ports.Port, 'get_objects')
def test_ensure_port_direct_port_delete_true(self,
mock_get_objects,
mock_delete_vlan,
mock_has_host,
mock_port_get_object,
mock_get_lock):
self.mech.ensure_port(self.mock_port_dt,
self.mock_port_dt,
self.testhost,
self.testport,
self.testphysnet,
self.mock_port_dt,
self.testsegid,
delete=True)
mock_delete_vlan.assert_called_with(self.testhost,
self.testport,
self.testsegid)
@mock.patch('network_runner.api.NetworkRunner.delete_trunk_vlan')
@mock.patch.object(ports.Port, 'get_objects')
@mock.patch.object(network.Network, 'get_object')
def test_ensure_port_no_delete_w_active_ports_dt(self,
mock_get_object,
mock_get_objects,
mock_delete_vlan,
mock_has_host,
mock_port_get_object,
mock_get_lock):
'''
Check if there are port bindings associated with the same
compute host that the port is being deleted | |
# Copyright (c) 2016 Jiocloud.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from dss_op import *
from dss_auth import *
from jcsclient import utils
import os
import sys
import time
import hmac
import json
import base64
import requests
import exceptions
from email.utils import formatdate
import xml.sax
import json
class ObjectOp(DSSOp):
def __init__(self):
DSSOp.__init__(self)
def parse_args(self, args):
params = {}
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--bucket', required=True)
parser.add_argument('--key', required=True)
args = parser.parse_args(args)
args_dict = vars(args)
self.bucket_name = args_dict['bucket']
self.object_name = args_dict['key']
self.dss_op_path = '/' + self.bucket_name + '/' + self.object_name
self.dss_op_path = urllib2.quote(self.dss_op_path.encode("utf8"))
def validate_args(self):
pass
def execute(self):
resp = self.make_request()
return resp
def process_result(self, result):
# nonop currently
return result
class DeleteObjectOp(ObjectOp):
def __init__(self):
ObjectOp.__init__(self)
self.http_method = 'DELETE'
class HeadObjectOp(ObjectOp):
def __init__(self):
ObjectOp.__init__(self)
self.http_method = 'HEAD'
def process_result(self, result):
# nonop currently, just dump a json in case of success
if(result is not None and result.status_code == 200):
response_json = ('{'
'"AcceptRanges": "'+ result.headers['accept-ranges'] + '",'
'"ContentType": "' + result.headers['content-type'] + '",'
'"LastModified": "' + result.headers['date'] + '",'
'"ContentLength": ' + result.headers['content-length'] + ','
'"ETag": "' + result.headers['etag'].replace('"', '\\"') + '",'
'"Metadata": {}'
'}')
self.pretty_print_json_str(response_json)
return result
class PutObjectOp(ObjectOp):
def __init__(self):
ObjectOp.__init__(self)
self.http_method = 'PUT'
def parse_args(self, args):
params = {}
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--bucket', required=True)
parser.add_argument('--key', required=True)
parser.add_argument('--body', required=True)
args = parser.parse_args(args)
args_dict = vars(args)
self.bucket_name = args_dict['bucket']
self.object_name = args_dict['key']
self.local_file_name = args_dict['body']
self.dss_op_path = '/' + self.bucket_name + '/' + self.object_name
self.dss_op_path = urllib2.quote(self.dss_op_path.encode("utf8"))
def validate_args(self):
pass
def execute(self):
# get signature
auth = DSSAuth(self.http_method, self.access_key, self.secret_key, self.dss_op_path, content_type = 'application/octet-stream')
signature = auth.get_signature()
self.http_headers['Authorization'] = signature
self.http_headers['Date'] = formatdate(usegmt=True)
statinfo = os.stat(self.local_file_name)
self.http_headers['Content-Length'] = statinfo.st_size
self.http_headers['Content-Type'] = 'application/octet-stream'
# construct request
request_url = self.dss_url + self.dss_op_path
data = open(self.local_file_name, 'rb')
# make request
resp = requests.put(request_url, headers = self.http_headers, data=data, verify = self.is_secure_request)
return resp
def process_result(self, result):
# nonop currently, just dump a json in case of success
if(result is not None and result.status_code == 200):
response_json = ('{'
'"AcceptRanges": "'+ result.headers['accept-ranges'] + '",'
'"LastModified": "' + result.headers['date'] + '",'
'"ETag": "' + result.headers['etag'].replace('"', '\\"') + '"'
'}')
self.pretty_print_json_str(response_json)
return result
class GetObjectOp(ObjectOp):
def __init__(self):
ObjectOp.__init__(self)
self.http_method = 'GET'
def parse_args(self, args):
params = {}
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--bucket', required=True)
parser.add_argument('--key', required=True)
parser.add_argument('--outfile')
args = parser.parse_args(args)
args_dict = vars(args)
self.bucket_name = args_dict['bucket']
self.object_name = args_dict['key']
if(args_dict['outfile'] is not None):
self.local_file_name = args_dict['outfile']
else:
self.local_file_name = self.object_name
self.dss_op_path = '/' + self.bucket_name + '/' + self.object_name
self.dss_op_path = urllib2.quote(self.dss_op_path.encode("utf8"))
def validate_args(self):
pass
def execute(self):
# get signature
auth = DSSAuth(self.http_method, self.access_key, self.secret_key, self.dss_op_path)
signature = auth.get_signature()
self.http_headers['Authorization'] = signature
self.http_headers['Date'] = formatdate(usegmt=True)
# construct request
request_url = self.dss_url + self.dss_op_path
# make request
resp = ''
with open(self.local_file_name, 'wb') as handle:
resp = requests.get(request_url, headers = self.http_headers, stream = True, verify = self.is_secure_request)
if resp.ok:
for block in resp.iter_content(1024):
handle.write(block)
else:
resp.raise_for_status()
return resp
def process_result(self, result):
# nonop currently, just dump a json in case of success
if(result is not None and result.status_code == 200):
response_json = ('{'
'"AcceptRanges": "'+ result.headers['accept-ranges'] + '",'
'"ContentType": "' + result.headers['content-type'] + '",'
'"LastModified": "' + result.headers['last-modified'] + '",'
'"ContentLength": ' + result.headers['content-length'] + ','
'"ETag": "' + result.headers['etag'].replace('"', '\\"') + '"'
'}')
self.pretty_print_json_str(response_json)
return result
class GetPresignedURLOp(ObjectOp):
def __init__(self):
ObjectOp.__init__(self)
self.http_method = 'GET'
def parse_args(self, args):
params = {}
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--bucket', required=True)
parser.add_argument('--key', required=True)
parser.add_argument('--expiry', required=True)
args = parser.parse_args(args)
args_dict = vars(args)
self.bucket_name = args_dict['bucket']
self.object_name = args_dict['key']
self.validity = args_dict['expiry']
self.dss_op_path = '/' + self.bucket_name + '/' + self.object_name
self.dss_op_path = urllib2.quote(self.dss_op_path.encode("utf8"))
def validate_args(self):
pass
def execute(self):
# get signature
expiry_time = int(time.time()) + int(self.validity)
auth = DSSAuth(self.http_method, self.access_key, self.secret_key, self.dss_op_path, use_time_in_seconds = True, expiry_time = expiry_time)
signature = auth.get_signature()
# url encode the signature
# construct url
request_url = self.dss_url + self.dss_op_path
request_url = request_url + '?JCSAccessKeyId=' + self.access_key + '&Expires=' + str(expiry_time) + '&Signature=' + urllib2.quote(signature.encode("utf8"))
response_json = '{"DownloadUrl": "' + request_url + '"}'
self.pretty_print_json_str(response_json)
resp = None
return resp
def process_result(self, result):
# nonop currently, just dump a json in case of success
return result
class CopyObjectOp(ObjectOp):
def __init__(self):
ObjectOp.__init__(self)
self.http_method = 'PUT'
def parse_args(self, args):
params = {}
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--bucket', required=True)
parser.add_argument('--key', required=True)
parser.add_argument('--copy-source', required=True)
args = parser.parse_args(args)
args_dict = vars(args)
self.bucket_name = args_dict['bucket']
self.object_name = args_dict['key']
self.copy_source = args_dict['copy_source']
self.dss_op_path = '/' + self.bucket_name + '/' + self.object_name
self.dss_op_path = urllib2.quote(self.dss_op_path.encode("utf8"))
def validate_args(self):
# check for valid copy source should be <bucket>/<object>
pos = self.copy_source.find('/')
if(pos == -1 or pos == 0 or pos == len(self.copy_source) - 1):
raise ValueError('copy-source should be of format <bucket-name>/<object-name>')
def execute(self):
self.http_headers['x-jcs-metadata-directive'] = 'COPY'
self.http_headers['x-jcs-copy-source'] = self.copy_source
resp = self.make_request()
return resp
def process_result(self, result):
# nonop currently, just dump a json in case of success
return result
class InitMPUploadOp(ObjectOp):
def __init__(self):
ObjectOp.__init__(self)
self.http_method = 'POST'
self.dss_query_str = 'uploads'
self.dss_query_str_for_signature = 'uploads'
class CancelMPUploadOp(ObjectOp):
def __init__(self):
ObjectOp.__init__(self)
self.http_method = 'DELETE'
def parse_args(self, args):
params = {}
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--bucket', required=True)
parser.add_argument('--key', required=True)
parser.add_argument('--upload-id', required=True)
args = parser.parse_args(args)
args_dict = vars(args)
self.bucket_name = args_dict['bucket']
self.object_name = args_dict['key']
self.upload_id = args_dict['upload_id']
self.dss_op_path = '/' + self.bucket_name + '/' + self.object_name
self.dss_op_path = urllib2.quote(self.dss_op_path.encode("utf8"))
self.dss_query_str = 'uploadId=' + self.upload_id
self.dss_query_str_for_signature = 'uploadId=' + self.upload_id
class ListPartsOp(ObjectOp):
def __init__(self):
ObjectOp.__init__(self)
self.http_method = 'GET'
def parse_args(self, args):
params = {}
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--bucket', required=True)
parser.add_argument('--key', required=True)
parser.add_argument('--upload-id', required=True)
args = parser.parse_args(args)
args_dict = vars(args)
self.bucket_name = args_dict['bucket']
self.object_name = args_dict['key']
self.upload_id = args_dict['upload_id']
self.dss_op_path = '/' + self.bucket_name + '/' + self.object_name
self.dss_op_path = urllib2.quote(self.dss_op_path.encode("utf8"))
self.dss_query_str = 'uploadId=' + self.upload_id
self.dss_query_str_for_signature = 'uploadId=' + self.upload_id
def execute(self):
resp = self.make_request()
return resp
class UploadPartOp(ObjectOp):
def __init__(self):
ObjectOp.__init__(self)
self.http_method = 'PUT'
def parse_args(self, args):
params = {}
args = args[1:]
parser = utils.get_argument_parser()
parser.add_argument('--bucket', required=True)
parser.add_argument('--key', required=True)
parser.add_argument('--upload-id', required=True)
parser.add_argument('--part-number', required=True)
parser.add_argument('--body', required=True)
args = parser.parse_args(args)
args_dict = vars(args)
self.bucket_name = args_dict['bucket']
self.object_name = args_dict['key']
self.upload_id = args_dict['upload_id']
self.part_number = args_dict['part_number']
self.local_file_name = args_dict['body']
self.dss_op_path = '/' + self.bucket_name + '/' + self.object_name
self.dss_op_path = urllib2.quote(self.dss_op_path.encode("utf8"))
self.dss_query_str = 'partNumber=' + self.part_number + '&uploadId=' + self.upload_id
self.dss_query_str_for_signature = 'partNumber=' + self.part_number + '&uploadId=' + self.upload_id
def execute(self):
# get signature
query_str = 'partNumber=' + self.part_number + '&uploadId=' + self.upload_id
auth = DSSAuth(self.http_method, self.access_key, self.secret_key, self.dss_op_path, query_str = self.dss_query_str_for_signature, content_type = 'application/octet-stream')
signature = auth.get_signature()
self.http_headers['Authorization'] = signature
self.http_headers['Date'] = formatdate(usegmt=True)
statinfo = os.stat(self.local_file_name)
self.http_headers['Content-Length'] = statinfo.st_size
self.http_headers['Content-Type'] = 'application/octet-stream'
# construct request
request_url = self.dss_url + self.dss_op_path
if(self.dss_query_str is not None):
request_url += '?' + self.dss_query_str
data = open(self.local_file_name, 'rb')
# make request
resp = requests.put(request_url, headers = self.http_headers, data=data, verify = self.is_secure_request)
return resp
def process_result(self, result):
# nonop currently, just dump a json in case of success
if(result is not None and result.status_code == 200):
response_json = ('{'
'"ETag": | |
(size_exp + 4)
start = number * size
if start >= len(self.payload):
raise error.BadRequest("Block request out of bounds")
end = start + size if start + size < len(self.payload) else len(self.payload)
more = True if end < len(self.payload) else False
payload = self.payload[start:end]
blockopt = (number, more, size_exp)
if self.code.is_request():
return self.copy(
payload=payload,
mid=None,
block1=blockopt
)
else:
return self.copy(
payload=payload,
mid=None,
block2=blockopt
)
def _append_request_block(self, next_block):
"""Modify message by appending another block"""
if not self.code.is_request():
raise ValueError("_append_request_block only works on requests.")
block1 = next_block.opt.block1
if block1.more:
if len(next_block.payload) == block1.size:
pass
elif block1.size_exponent == 7 and \
len(next_block.payload) % block1.size == 0:
pass
else:
raise error.BadRequest("Payload size does not match Block1")
if block1.start == len(self.payload):
self.payload += next_block.payload
self.opt.block1 = block1
self.token = next_block.token
self.mid = next_block.mid
if not block1.more and next_block.opt.block2 is not None:
self.opt.block2 = next_block.opt.block2
else:
# possible extension point: allow messages with "gaps"; then
# ValueError would only be raised when trying to overwrite an
# existing part; it is doubtful though that the blockwise
# specification even condones such behavior.
raise ValueError()
def _append_response_block(self, next_block):
"""Append next block to current response message.
Used when assembling incoming blockwise responses."""
if not self.code.is_response():
raise ValueError("_append_response_block only works on responses.")
block2 = next_block.opt.block2
if not block2.is_valid_for_payload_size(len(next_block.payload)):
raise error.UnexpectedBlock2("Payload size does not match Block2")
if block2.start != len(self.payload):
# Does not need to be implemented as long as the requesting code
# sequentially clocks out data
raise error.NotImplemented()
if next_block.opt.etag != self.opt.etag:
raise error.ResourceChanged()
self.payload += next_block.payload
self.opt.block2 = block2
self.token = next_block.token
self.mid = next_block.mid
def _generate_next_block2_request(self, response):
"""Generate a sub-request for next response block.
This method is used by client after receiving blockwise response from
server with "more" flag set."""
# Note: response here is the assembled response, but (due to
# _append_response_block's workings) it carries the Block2 option of
# the last received block.
next_after_received = len(response.payload) // response.opt.block2.size
blockopt = optiontypes.BlockOption.BlockwiseTuple(
next_after_received, False, response.opt.block2.size_exponent)
# has been checked in assembly, just making sure
assert blockopt.start == len(response.payload)
blockopt = blockopt.reduced_to(response.remote.maximum_block_size_exp)
return self.copy(
payload=b"",
mid=None,
token=None,
block2=blockopt,
block1=None,
observe=None
)
def _generate_next_block1_response(self):
"""Generate a response to acknowledge incoming request block.
This method is used by server after receiving blockwise request from
client with "more" flag set."""
response = Message(code=CHANGED, token=self.token)
response.remote = self.remote
if self.opt.block1.block_number == 0 and self.opt.block1.size_exponent > DEFAULT_BLOCK_SIZE_EXP:
new_size_exponent = DEFAULT_BLOCK_SIZE_EXP
response.opt.block1 = (0, True, new_size_exponent)
else:
response.opt.block1 = (self.opt.block1.block_number, True, self.opt.block1.size_exponent)
return response
#
# the message in the context of network and addresses
#
def get_request_uri(self, *, local_is_server=False):
"""The absolute URI this message belongs to.
For requests, this is composed from the options (falling back to the
remote). For responses, this is largely taken from the original request
message (so far, that could have been trackecd by the requesting
application as well), but -- in case of a multicast request -- with the
host replaced by the responder's endpoint details.
This implements Section 6.5 of RFC7252.
By default, these values are only valid on the client. To determine a
message's request URI on the server, set the local_is_server argument
to True. Note that determining the request URI on the server is brittle
when behind a reverse proxy, may not be possible on all platforms, and
can only be applied to a request message in a renderer (for the
response message created by the renderer will only be populated when it
gets transmitted; simple manual copying of the request's remote to the
response will not magically make this work, for in the very case where
the request and response's URIs differ, that would not catch the
difference and still report the multicast address, while the actual
sending address will only be populated by the operating system later).
"""
# maybe this function does not belong exactly *here*, but it belongs to
# the results of .request(message), which is currently a message itself.
if self.code.is_response():
refmsg = self.request
if refmsg.remote.is_multicast:
if local_is_server:
multicast_netloc_override = self.remote.hostinfo_local
else:
multicast_netloc_override = self.remote.hostinfo
else:
multicast_netloc_override = None
else:
refmsg = self
multicast_netloc_override = None
proxyuri = refmsg.opt.proxy_uri
if proxyuri is not None:
return proxyuri
scheme = refmsg.opt.proxy_scheme or refmsg.remote.scheme
query = refmsg.opt.uri_query or ()
path = refmsg.opt.uri_path
if multicast_netloc_override is not None:
netloc = multicast_netloc_override
else:
if local_is_server:
netloc = refmsg.remote.hostinfo_local
else:
netloc = refmsg.remote.hostinfo
if refmsg.opt.uri_host is not None or \
refmsg.opt.uri_port is not None:
host, port = hostportsplit(netloc)
host = refmsg.opt.uri_host or host
port = refmsg.opt.uri_port or port
# FIXME: This sounds like it should be part of
# hpostportjoin/-split
escaped_host = quote_nonascii(host)
# FIXME: "If host is not valid reg-name / IP-literal / IPv4address,
# fail"
netloc = hostportjoin(escaped_host, port)
# FIXME this should follow coap section 6.5 more closely
query = "&".join(_quote_for_query(q) for q in query)
path = ''.join("/" + _quote_for_path(p) for p in path) or '/'
fragment = None
params = "" # are they not there at all?
# Eases debugging, for when thy raise from urunparse you won't know
# which it was
assert scheme is not None
assert netloc is not None
return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
def set_request_uri(self, uri, *, set_uri_host=True):
"""Parse a given URI into the uri_* fields of the options.
The remote does not get set automatically; instead, the remote data is
stored in the uri_host and uri_port options. That is because name resolution
is coupled with network specifics the protocol will know better by the
time the message is sent. Whatever sends the message, be it the
protocol itself, a proxy wrapper or an alternative transport, will know
how to handle the information correctly.
When ``set_uri_host=False`` is passed, the host/port is stored in the
``unresolved_remote`` message property instead of the uri_host option;
as a result, the unresolved host name is not sent on the wire, which
breaks virtual hosts but makes message sizes smaller.
This implements Section 6.4 of RFC7252.
"""
parsed = urllib.parse.urlparse(uri)
if parsed.fragment:
raise ValueError("Fragment identifiers can not be set on a request URI")
if parsed.scheme not in coap_schemes:
self.opt.proxy_uri = uri
return
if parsed.username or parsed.password:
raise ValueError("User name and password not supported.")
if parsed.path not in ('', '/'):
self.opt.uri_path = [urllib.parse.unquote(x) for x in parsed.path.split('/')[1:]]
else:
self.opt.uri_path = []
if parsed.query:
self.opt.uri_query = [urllib.parse.unquote(x) for x in parsed.query.split('&')]
else:
self.opt.uri_query = []
self.remote = UndecidedRemote(parsed.scheme, parsed.netloc)
is_ip_literal = parsed.netloc.startswith('[') or (
parsed.hostname.count('.') == 3 and
all(c in '0123456789.' for c in parsed.hostname) and
all(int(x) <= 255 for x in parsed.hostname.split('.')))
if set_uri_host and not is_ip_literal:
self.opt.uri_host = urllib.parse.unquote(parsed.hostname).translate(_ascii_lowercase)
# Deprecated accessors to moved functionality
@property
def unresolved_remote(self):
return self.remote.hostinfo
@unresolved_remote.setter
def unresolved_remote(self, value):
# should get a big fat deprecation warning
if value is None:
self.remote = UndecidedRemote('coap', None)
else:
self.remote = UndecidedRemote('coap', value)
@property
def requested_scheme(self):
if self.code.is_request():
return self.remote.scheme
else:
return self.request.requested_scheme
@requested_scheme.setter
def requested_scheme(self, value):
self.remote = UndecidedRemote(value, self.remote.hostinfo)
@property
def requested_proxy_uri(self):
return self.request.opt.proxy_uri
@property
def requested_hostinfo(self):
return self.request.opt.uri_host or self.request.unresolved_remote
@property
def requested_path(self):
return self.request.opt.uri_path
@property
def requested_query(self):
return self.request.opt.uri_query
class UndecidedRemote(
namedtuple("_UndecidedRemote", ("scheme", "hostinfo")),
interfaces.EndpointAddress
):
"""Remote that is set on messages that have not been sent through any any
transport.
It describes scheme, hostname and port that were set in
:meth:`.set_request_uri()` or when setting a URI per Message constructor.
* :attr:`scheme`: The scheme string
* :attr:`hostinfo`: The authority component of the URI, as it would occur
in the URI.
"""
@classmethod
def from_pathless_uri(cls, uri: str) -> UndecidedRemote:
"""Create an UndecidedRemote for a given URI that has no query, path,
fragment or other components not expressed in an UndecidedRemote
>>> from aiocoap.message import UndecidedRemote
>>> UndecidedRemote.from_pathless_uri("coap://localhost")
UndecidedRemote(scheme='coap', hostinfo='localhost')
>>> UndecidedRemote.from_pathless_uri("coap+tcp://[::1]:1234")
UndecidedRemote(scheme='coap+tcp', hostinfo='[::1]:1234')
"""
parsed = urllib.parse.urlparse(uri)
if parsed.username or parsed.password:
raise ValueError("User name and password not supported.")
if parsed.path not in ('', '/') or parsed.query or parsed.fragment:
raise ValueError("Paths and query and fragment can | |
res = {}
for i in t[0:i2i_sim_limit]:
res[i[0]]=i[1]
sim_item[key] = res
user2recall = {}
for user,qtime in zip(df['user_id'],df['time']):
user2recall[(user,qtime)] = recommend(sim_item,user_item_dict,user_time_dict,user,qtime,0.7,'i2iw10')
if len(user2recall) % 100 ==0:
print(len(user2recall))
phase_ndcg_pred_answer = []
answers_source = []
for predict_stage in range(cur_stage+1):
predictions = []
df_now = df_stage[df_stage['stage'] == predict_stage]
df_train = df_train_stage[df_train_stage['stage'] == predict_stage]
stage_items = set(df_train['item_id'])
cur_user_item_dict = user_item_dict
print(f'i2i_w10 recall start {predict_stage}')
for user_id,it,qtime in zip(df_now['user_id'],df_now['item_id'],df_now['time']):
recall_items = user2recall[(user_id,qtime)]
new_recall = []
for re in recall_items:
if re[0] == it:
new_recall.append(re)
elif (user_id not in cur_user_item_dict) or (re[0] not in cur_user_item_dict[user_id]):
if re[0] in stage_items:
new_recall.append(re)
predictions.append(new_recall)
if len(predictions)%1000 == 0:
tot = len(df_now['user_id'])
print(f'now: {len(predictions)}, tot: {tot}')
phase_ndcg_pred_answer.append( predictions )
if get_sum:
utils.dump_pickle( phase_ndcg_pred_answer, i2i_w10_recall_scoure_path.format( mode, cur_stage, 'sum' ) )
else:
utils.dump_pickle( phase_ndcg_pred_answer, i2i_w10_recall_scoure_path.format( mode, cur_stage, 'nosum' ) )
def b2b2b_recall(df_train, df_train_stage, df, df_stage):
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
blend_sim = utils.load_sim(item_blend_sim_path)
blend_score = {}
for item in blend_sim:
i = item[0]
blend_score.setdefault(i,{})
for j,cij in item[1][:b2b2b_sim_limit]:
blend_score[i][j] = cij
import time
t1 = time.time()
blend_score_2 = {}
for idx,item1 in enumerate( blend_score.keys() ):
if idx%10000==0:
t2 = time.time()
print( f'use time {t2-t1} for 10000, now {idx} , tot {len(blend_score.keys())}' )
t1 = t2
blend_score_2.setdefault(item1, {})
for item2 in blend_score[item1].keys():
if item2 == item1:
continue
for item3 in blend_score[item2].keys():
if item3 == item1 or item3 == item2:
continue
blend_score_2[item1].setdefault(item3, 0)
blend_score_2[item1][item3] += blend_score[item1][item2]*blend_score[item2][item3]
user2recall_blendsim = {}
for user,qtime in zip(df['user_id'],df['time']):
user2recall_blendsim[(user,qtime)] = recommend(blend_score_2,user_item_dict,user_time_dict,user,qtime,0.7,'b2b2b')
if len(user2recall_blendsim) % 100 ==0:
print(len(user2recall_blendsim))
phase_ndcg_pred_answer = []
phase_item_degree = utils.load_pickle(phase_full_item_degree_path.format(cur_stage))
for predict_stage in range(cur_stage+1):
df_now = df_stage[df_stage['stage'] == predict_stage]
df_train = df_train_stage[df_train_stage['stage'] == predict_stage]
stage_items = set(df_train['item_id'])
cur_user_item_dict = user_item_dict
blend_predictions = []
print(f'b2b2b recall start {predict_stage}')
for user_id,it,qtime in zip(df_now['user_id'],df_now['item_id'],df_now['time']):
recall_items = user2recall_blendsim[(user_id,qtime)]
new_recall = []
for re in recall_items:
if re[0] == it:
new_recall.append(re)
elif (user_id not in cur_user_item_dict) or (re[0] not in cur_user_item_dict[user_id]):
if re[0] in stage_items:# and re in feat_item_set:
new_recall.append(re)
blend_predictions.append(new_recall)
if len(blend_predictions)%1000 == 0:
tot = len(df_now['user_id'])
print(f'now: {len(blend_predictions)}, tot: {tot}')
phase_ndcg_pred_answer.append( blend_predictions )
if get_sum:
utils.dump_pickle( phase_ndcg_pred_answer, b2b2b_recall_scoure_path.format( mode, cur_stage, 'sum' ) )
else:
utils.dump_pickle( phase_ndcg_pred_answer, b2b2b_recall_scoure_path.format( mode, cur_stage, 'nosum' ) )
def i2i2b_recall(df_train, df_train_stage, df, df_stage):
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
all_pair_num = 0
sim_item = {}
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
all_pair_num += 1
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
for i, related_items in sim_item.items():
for j, cij in related_items.items():
sim_item[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
print('all_pair_num',all_pair_num)
for key in sim_item.keys():
t = sim_item[key]
t = sorted(t.items(), key=lambda d:d[1], reverse = True )
res = {}
for i in t[0:i2i2b_i_sim_limit]:
res[i[0]]=i[1]
sim_item[key] = res
blend_sim = utils.load_sim(item_blend_sim_path)
blend_score = {}
for item in blend_sim:
i = item[0]
blend_score.setdefault(i,{})
for j,cij in item[1][:i2i2b_b_sim_limit]:
blend_score[i][j] = cij
import time
t1 = time.time()
blend_score_2 = {}
for idx,item1 in enumerate( sim_item.keys() ):
if idx%10000==0:
t2 = time.time()
print( f'use time {t2-t1} for 10000, now {idx} , tot {len(sim_item.keys())}' )
t1 = t2
blend_score_2.setdefault(item1, {})
for item2 in sim_item[item1].keys():
if item2 == item1:
continue
if item2 in blend_score.keys():
for item3 in blend_score[item2].keys():
if item3 == item1 or item3 == item2:
continue
blend_score_2[item1].setdefault(item3, 0)
blend_score_2[item1][item3] += sim_item[item1][item2]*blend_score[item2][item3]
user2recall_blendsim = {}
for user,qtime in zip(df['user_id'],df['time']):
user2recall_blendsim[(user,qtime)] = recommend(blend_score_2,user_item_dict,user_time_dict,user,qtime,0.7,'i2i2b')
if len(user2recall_blendsim) % 100 ==0:
print(len(user2recall_blendsim))
phase_ndcg_pred_answer = []
phase_item_degree = utils.load_pickle(phase_full_item_degree_path.format(cur_stage))
for predict_stage in range(cur_stage+1):
df_now = df_stage[df_stage['stage'] == predict_stage]
df_train = df_train_stage[df_train_stage['stage'] == predict_stage]
stage_items = set(df_train['item_id'])
cur_user_item_dict = user_item_dict
blend_predictions = []
print(f'i2i2b recall start {predict_stage}')
for user_id,it,qtime in zip(df_now['user_id'],df_now['item_id'],df_now['time']):
recall_items = user2recall_blendsim[(user_id,qtime)]
new_recall = []
for re in recall_items:
if re[0] == it:
new_recall.append(re)
elif (user_id not in cur_user_item_dict) or (re[0] not in cur_user_item_dict[user_id]):
if re[0] in stage_items:# and re in feat_item_set:
new_recall.append(re)
blend_predictions.append(new_recall)
if len(blend_predictions)%1000 == 0:
tot = len(df_now['user_id'])
print(f'now: {len(blend_predictions)}, tot: {tot}')
phase_ndcg_pred_answer.append( blend_predictions )
if get_sum:
utils.dump_pickle( phase_ndcg_pred_answer, i2i2b_recall_scoure_path.format( mode, cur_stage, 'sum' ) )
else:
utils.dump_pickle( phase_ndcg_pred_answer, i2i2b_recall_scoure_path.format( mode, cur_stage, 'nosum' ) )
def b2b2i_recall(df_train, df_train_stage, df, df_stage):
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
all_pair_num = 0
sim_item = {}
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
all_pair_num += 1
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
for i, related_items in sim_item.items():
for j, cij in related_items.items():
sim_item[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
print('all_pair_num',all_pair_num)
for key in sim_item.keys():
t = sim_item[key]
t = sorted(t.items(), key=lambda d:d[1], reverse = True )
res = {}
for i in t[0:b2b2i_i_sim_limit]:
res[i[0]]=i[1]
sim_item[key] = res
blend_sim = utils.load_sim(item_blend_sim_path)
blend_score = {}
for item in blend_sim:
i = item[0]
blend_score.setdefault(i,{})
for j,cij in item[1][:b2b2i_b_sim_limit]:
blend_score[i][j] = cij
import time
t1 = time.time()
blend_score_2 = {}
for idx,item1 in enumerate( blend_score.keys() ):
if idx%10000==0:
t2 = time.time()
print( f'use time {t2-t1} for 10000, now {idx} , tot {len(blend_score.keys())}' )
t1 = t2
blend_score_2.setdefault(item1, {})
for item2 in blend_score[item1].keys():
if item2 == item1:
continue
if item2 in sim_item.keys():
for item3 in sim_item[item2].keys():
if item3 == item1 or item3 == item2:
continue
blend_score_2[item1].setdefault(item3, 0)
blend_score_2[item1][item3] += blend_score[item1][item2]*sim_item[item2][item3]
user2recall_blendsim = {}
for user,qtime in zip(df['user_id'],df['time']):
user2recall_blendsim[(user,qtime)] = recommend(blend_score_2,user_item_dict,user_time_dict,user,qtime,0.7,'b2b2i')
if len(user2recall_blendsim) % 100 ==0:
print(len(user2recall_blendsim))
phase_ndcg_pred_answer = []
phase_item_degree = utils.load_pickle(phase_full_item_degree_path.format(cur_stage))
for predict_stage in range(cur_stage+1):
df_now = df_stage[df_stage['stage'] == predict_stage]
df_train = df_train_stage[df_train_stage['stage'] == predict_stage]
stage_items = set(df_train['item_id'])
cur_user_item_dict = user_item_dict
blend_predictions = []
print(f'b2b2i recall start {predict_stage}')
for user_id,it,qtime in zip(df_now['user_id'],df_now['item_id'],df_now['time']):
recall_items = user2recall_blendsim[(user_id,qtime)]
new_recall = []
for re in recall_items:
if re[0] == it:
new_recall.append(re)
elif (user_id not in cur_user_item_dict) or (re[0] not in cur_user_item_dict[user_id]):
if re[0] in stage_items:# and re in feat_item_set:
new_recall.append(re)
blend_predictions.append(new_recall)
if len(blend_predictions)%1000 == 0:
tot = len(df_now['user_id'])
print(f'now: {len(blend_predictions)}, tot: {tot}')
phase_ndcg_pred_answer.append( blend_predictions )
if get_sum:
utils.dump_pickle( phase_ndcg_pred_answer, b2b2i_recall_scoure_path.format( mode, cur_stage, 'sum' ) )
else:
utils.dump_pickle( phase_ndcg_pred_answer, b2b2i_recall_scoure_path.format( mode, cur_stage, 'nosum' ) )
def b2bl2_recall(df_train, df_train_stage, df, df_stage):
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
blend_sim = utils.load_sim(item_text_l2_sim_path)
blend_score = {}
for item in blend_sim:
i = item[0]
blend_score.setdefault(i,{})
for j,cij in item[1][:b2b_sim_limit]:
blend_score[i][j] = cij
user2recall_blendsim = {}
for user,qtime in zip(df['user_id'],df['time']):
| |
res[1] or False
class stock_production_lot(osv.osv):
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['name', 'prefix', 'ref'], context)
res = []
for record in reads:
name = record['name']
prefix = record['prefix']
if prefix:
name = prefix + '/' + name
if record['ref']:
name = '%s [%s]' % (name, record['ref'])
res.append((record['id'], name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
args = args or []
ids = []
if name:
ids = self.search(cr, uid, [('prefix', '=', name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
_name = 'stock.production.lot'
_description = 'Serial Number'
def _get_stock(self, cr, uid, ids, field_name, arg, context=None):
""" Gets stock of products for locations
@return: Dictionary of values
"""
if context is None:
context = {}
if 'location_id' not in context:
locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')], context=context)
else:
locations = context['location_id'] and [context['location_id']] or []
if isinstance(ids, (int, long)):
ids = [ids]
res = {}.fromkeys(ids, 0.0)
if locations:
cr.execute('''select
prodlot_id,
sum(qty)
from
stock_report_prodlots
where
location_id IN %s and prodlot_id IN %s group by prodlot_id''',(tuple(locations),tuple(ids),))
res.update(dict(cr.fetchall()))
return res
def _stock_search(self, cr, uid, obj, name, args, context=None):
""" Searches Ids of products
@return: Ids of locations
"""
locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')])
cr.execute('''select
prodlot_id,
sum(qty)
from
stock_report_prodlots
where
location_id IN %s group by prodlot_id
having sum(qty) '''+ str(args[0][1]) + str(args[0][2]),(tuple(locations),))
res = cr.fetchall()
ids = [('id', 'in', map(lambda x: x[0], res))]
return ids
_columns = {
'name': fields.char('Serial Number', size=64, required=True, help="Unique Serial Number, will be displayed as: PREFIX/SERIAL [INT_REF]"),
'ref': fields.char('Internal Reference', size=256, help="Internal reference number in case it differs from the manufacturer's serial number"),
'prefix': fields.char('Prefix', size=64, help="Optional prefix to prepend when displaying this serial number: PREFIX/SERIAL [INT_REF]"),
'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]),
'date': fields.datetime('Creation Date', required=True),
'stock_available': fields.function(_get_stock, fnct_search=_stock_search, type="float", string="Available", select=True,
help="Current quantity of products with this Serial Number available in company warehouses",
digits_compute=dp.get_precision('Product Unit of Measure')),
'revisions': fields.one2many('stock.production.lot.revision', 'lot_id', 'Revisions'),
'company_id': fields.many2one('res.company', 'Company', select=True),
'move_ids': fields.one2many('stock.move', 'prodlot_id', 'Moves for this serial number', readonly=True),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'),
'product_id': lambda x, y, z, c: c.get('product_id', False),
}
_sql_constraints = [
('name_ref_uniq', 'unique (name, ref)', 'The combination of Serial Number and internal reference must be unique !'),
]
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of a product
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
value=self.pool.get('action.traceability').action_traceability(cr,uid,ids,context)
return value
def copy(self, cr, uid, id, default=None, context=None):
context = context or {}
default = default and default.copy() or {}
default.update(date=time.strftime('%Y-%m-%d %H:%M:%S'), move_ids=[])
return super(stock_production_lot, self).copy(cr, uid, id, default=default, context=context)
stock_production_lot()
class stock_production_lot_revision(osv.osv):
_name = 'stock.production.lot.revision'
_description = 'Serial Number Revision'
_columns = {
'name': fields.char('Revision Name', size=64, required=True),
'description': fields.text('Description'),
'date': fields.date('Revision Date'),
'indice': fields.char('Revision Number', size=16),
'author_id': fields.many2one('res.users', 'Author'),
'lot_id': fields.many2one('stock.production.lot', 'Serial Number', select=True, ondelete='cascade'),
'company_id': fields.related('lot_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
}
_defaults = {
'author_id': lambda x, y, z, c: z,
'date': fields.date.context_today,
}
stock_production_lot_revision()
# ----------------------------------------------------
# Move
# ----------------------------------------------------
#
# Fields:
# location_dest_id is only used for predicting futur stocks
#
class stock_move(osv.osv):
def _getSSCC(self, cr, uid, context=None):
cr.execute('select id from stock_tracking where create_uid=%s order by id desc limit 1', (uid,))
res = cr.fetchone()
return (res and res[0]) or False
_name = "stock.move"
_description = "Stock Move"
_order = 'date_expected desc, id'
_log_create = False
def action_partial_move(self, cr, uid, ids, context=None):
if context is None: context = {}
if context.get('active_model') != self._name:
context.update(active_ids=ids, active_model=self._name)
partial_id = self.pool.get("stock.partial.move").create(
cr, uid, {}, context=context)
return {
'name':_("Products to Process"),
'view_mode': 'form',
'view_id': False,
'view_type': 'form',
'res_model': 'stock.partial.move',
'res_id': partial_id,
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'new',
'domain': '[]',
'context': context
}
def name_get(self, cr, uid, ids, context=None):
res = []
for line in self.browse(cr, uid, ids, context=context):
name = line.location_id.name+' > '+line.location_dest_id.name
# optional prefixes
if line.product_id.code:
name = line.product_id.code + ': ' + name
if line.picking_id.origin:
name = line.picking_id.origin + '/ ' + name
res.append((line.id, name))
return res
def _check_tracking(self, cr, uid, ids, context=None):
""" Checks if serial number is assigned to stock move or not.
@return: True or False
"""
for move in self.browse(cr, uid, ids, context=context):
if not move.prodlot_id and \
(move.state == 'done' and \
( \
(move.product_id.track_production and move.location_id.usage == 'production') or \
(move.product_id.track_production and move.location_dest_id.usage == 'production') or \
(move.product_id.track_incoming and move.location_id.usage == 'supplier') or \
(move.product_id.track_outgoing and move.location_dest_id.usage == 'customer') or \
(move.product_id.track_incoming and move.location_id.usage == 'inventory') \
)):
return False
return True
def _check_product_lot(self, cr, uid, ids, context=None):
""" Checks whether move is done or not and production lot is assigned to that move.
@return: True or False
"""
for move in self.browse(cr, uid, ids, context=context):
if move.prodlot_id and move.state == 'done' and (move.prodlot_id.product_id.id != move.product_id.id):
return False
return True
_columns = {
'name': fields.char('Description', required=True, select=True),
'priority': fields.selection([('0', 'Not urgent'), ('1', 'Urgent')], 'Priority'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}),
'date_expected': fields.datetime('Scheduled Date', states={'done': [('readonly', True)]},required=True, select=True, help="Scheduled date for the processing of this move"),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type','<>','service')],states={'done': [('readonly', True)]}),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True,states={'done': [('readonly', True)]},
help="This is the quantity of products from an inventory "
"point of view. For moves in the state 'done', this is the "
"quantity of products that were actually moved. For other "
"moves, this is the quantity of product that is planned to "
"be moved. Lowering this quantity does not generate a "
"backorder. Changing this quantity on assigned moves affects "
"the product reservation, and should be done with care."
),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True,states={'done': [('readonly', True)]}),
'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product Unit of Measure'), states={'done': [('readonly', True)]}),
'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}),
'product_packaging': fields.many2one('product.packaging', 'Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True,states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True,states={'done': [('readonly', True)]}, select=True, help="Location where the system will stock the finished products."),
'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"),
'prodlot_id': fields.many2one('stock.production.lot', 'Serial Number', states={'done': [('readonly', True)]}, help="Serial number is used to put a serial number on the production", select=True),
'tracking_id': fields.many2one('stock.tracking', 'Pack', select=True, states={'done': [('readonly', True)]}, help="Logistical shipping unit: pallet, box, pack ..."),
'auto_validate': fields.boolean('Auto Validate'),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True),
'move_history_ids': fields.many2many('stock.move', 'stock_move_history_ids', 'parent_id', 'child_id', 'Move History (child moves)'),
'move_history_ids2': fields.many2many('stock.move', 'stock_move_history_ids', 'child_id', 'parent_id', 'Move History (parent moves)'),
'picking_id': fields.many2one('stock.picking', 'Reference', select=True,states={'done': [('readonly', True)]}),
'note': fields.text('Notes'),
'state': fields.selection([('draft', 'New'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Move'),
('confirmed', 'Waiting Availability'),
('assigned', 'Available'),
('done', 'Done'),
], 'Status', readonly=True, select=True,
help= "* New: When the stock move is created and not yet confirmed.\n"\
"* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\
"* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\
"* Available: When products are reserved, it is set to \'Available\'.\n"\
"* Done: When the shipment is processed, the state is \'Done\'."),
'price_unit': fields.float('Unit Price', digits_compute= dp.get_precision('Product Price'), help="Technical field used to record the product cost set by the user during a picking confirmation (when average price costing method | |
.. attribute:: brief_pcep_information
Brief PCE protocol information
**type**\: :py:class:`BriefPcepInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.BriefPcepInformation>`
.. attribute:: last_error_rx
Last PCError received
**type**\: :py:class:`LastErrorRx <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorRx>`
.. attribute:: last_error_tx
Last PCError sent
**type**\: :py:class:`LastErrorTx <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorTx>`
.. attribute:: error
Error (for display only)
**type**\: str
.. attribute:: speaker_id
Speaker Entity ID
**type**\: str
.. attribute:: pcep_up_time
PCEP Up Time
**type**\: int
**range:** 0..4294967295
.. attribute:: keepalives
Keepalive count
**type**\: int
**range:** 0..4294967295
.. attribute:: md5_enabled
MD5 Authentication Enabled
**type**\: bool
.. attribute:: keychain_enabled
Keychain based Authentication Enabled
**type**\: bool
.. attribute:: negotiated_local_keepalive
Negotiated KA
**type**\: int
**range:** 0..4294967295
.. attribute:: negotiated_remote_keepalive
Negotiated KA
**type**\: int
**range:** 0..4294967295
.. attribute:: negotiated_dead_time
Negotiated DT
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_request_rx
PCEReq Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_request_tx
PCEReq Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_reply_rx
PCERep Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_reply_tx
PCERep Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_error_rx
PCEErr Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_error_tx
PCEErr Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_open_tx
PCEOpen Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_open_rx
PCEOpen Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_report_rx
PCERpt Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_report_tx
PCERpt Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_update_rx
PCEUpd Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_update_tx
PCEUpd Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_initiate_rx
PCEInit Rx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_initiate_tx
PCEInit Tx
**type**\: int
**range:** 0..4294967295
.. attribute:: pce_keepalive_tx
PCE Keepalive Tx
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: pce_keepalive_rx
PCE Keepalive Rx
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: local_session_id
Local PCEP session ID
**type**\: int
**range:** 0..255
.. attribute:: remote_session_id
Remote PCEP session ID
**type**\: int
**range:** 0..255
.. attribute:: minimum_keepalive_interval
Minimum keepalive interval for the peer
**type**\: int
**range:** 0..255
.. attribute:: maximum_dead_interval
Maximum dead interval for the peer
**type**\: int
**range:** 0..255
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation, self).__init__()
self.yang_name = "detail-pcep-information"
self.yang_parent_name = "peer-detail-info"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("brief-pcep-information", ("brief_pcep_information", PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.BriefPcepInformation)), ("last-error-rx", ("last_error_rx", PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorRx)), ("last-error-tx", ("last_error_tx", PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorTx))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('error', YLeaf(YType.str, 'error')),
('speaker_id', YLeaf(YType.str, 'speaker-id')),
('pcep_up_time', YLeaf(YType.uint32, 'pcep-up-time')),
('keepalives', YLeaf(YType.uint32, 'keepalives')),
('md5_enabled', YLeaf(YType.boolean, 'md5-enabled')),
('keychain_enabled', YLeaf(YType.boolean, 'keychain-enabled')),
('negotiated_local_keepalive', YLeaf(YType.uint32, 'negotiated-local-keepalive')),
('negotiated_remote_keepalive', YLeaf(YType.uint32, 'negotiated-remote-keepalive')),
('negotiated_dead_time', YLeaf(YType.uint32, 'negotiated-dead-time')),
('pce_request_rx', YLeaf(YType.uint32, 'pce-request-rx')),
('pce_request_tx', YLeaf(YType.uint32, 'pce-request-tx')),
('pce_reply_rx', YLeaf(YType.uint32, 'pce-reply-rx')),
('pce_reply_tx', YLeaf(YType.uint32, 'pce-reply-tx')),
('pce_error_rx', YLeaf(YType.uint32, 'pce-error-rx')),
('pce_error_tx', YLeaf(YType.uint32, 'pce-error-tx')),
('pce_open_tx', YLeaf(YType.uint32, 'pce-open-tx')),
('pce_open_rx', YLeaf(YType.uint32, 'pce-open-rx')),
('pce_report_rx', YLeaf(YType.uint32, 'pce-report-rx')),
('pce_report_tx', YLeaf(YType.uint32, 'pce-report-tx')),
('pce_update_rx', YLeaf(YType.uint32, 'pce-update-rx')),
('pce_update_tx', YLeaf(YType.uint32, 'pce-update-tx')),
('pce_initiate_rx', YLeaf(YType.uint32, 'pce-initiate-rx')),
('pce_initiate_tx', YLeaf(YType.uint32, 'pce-initiate-tx')),
('pce_keepalive_tx', YLeaf(YType.uint64, 'pce-keepalive-tx')),
('pce_keepalive_rx', YLeaf(YType.uint64, 'pce-keepalive-rx')),
('local_session_id', YLeaf(YType.uint8, 'local-session-id')),
('remote_session_id', YLeaf(YType.uint8, 'remote-session-id')),
('minimum_keepalive_interval', YLeaf(YType.uint8, 'minimum-keepalive-interval')),
('maximum_dead_interval', YLeaf(YType.uint8, 'maximum-dead-interval')),
])
self.error = None
self.speaker_id = None
self.pcep_up_time = None
self.keepalives = None
self.md5_enabled = None
self.keychain_enabled = None
self.negotiated_local_keepalive = None
self.negotiated_remote_keepalive = None
self.negotiated_dead_time = None
self.pce_request_rx = None
self.pce_request_tx = None
self.pce_reply_rx = None
self.pce_reply_tx = None
self.pce_error_rx = None
self.pce_error_tx = None
self.pce_open_tx = None
self.pce_open_rx = None
self.pce_report_rx = None
self.pce_report_tx = None
self.pce_update_rx = None
self.pce_update_tx = None
self.pce_initiate_rx = None
self.pce_initiate_tx = None
self.pce_keepalive_tx = None
self.pce_keepalive_rx = None
self.local_session_id = None
self.remote_session_id = None
self.minimum_keepalive_interval = None
self.maximum_dead_interval = None
self.brief_pcep_information = PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.BriefPcepInformation()
self.brief_pcep_information.parent = self
self._children_name_map["brief_pcep_information"] = "brief-pcep-information"
self._children_yang_names.add("brief-pcep-information")
self.last_error_rx = PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorRx()
self.last_error_rx.parent = self
self._children_name_map["last_error_rx"] = "last-error-rx"
self._children_yang_names.add("last-error-rx")
self.last_error_tx = PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorTx()
self.last_error_tx.parent = self
self._children_name_map["last_error_tx"] = "last-error-tx"
self._children_yang_names.add("last-error-tx")
self._segment_path = lambda: "detail-pcep-information"
def __setattr__(self, name, value):
self._perform_setattr(PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation, ['error', 'speaker_id', 'pcep_up_time', 'keepalives', 'md5_enabled', 'keychain_enabled', 'negotiated_local_keepalive', 'negotiated_remote_keepalive', 'negotiated_dead_time', 'pce_request_rx', 'pce_request_tx', 'pce_reply_rx', 'pce_reply_tx', 'pce_error_rx', 'pce_error_tx', 'pce_open_tx', 'pce_open_rx', 'pce_report_rx', 'pce_report_tx', 'pce_update_rx', 'pce_update_tx', 'pce_initiate_rx', 'pce_initiate_tx', 'pce_keepalive_tx', 'pce_keepalive_rx', 'local_session_id', 'remote_session_id', 'minimum_keepalive_interval', 'maximum_dead_interval'], name, value)
class BriefPcepInformation(Entity):
"""
Brief PCE protocol information
.. attribute:: pcep_state
PCEP State
**type**\: :py:class:`PcepState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PcepState>`
.. attribute:: stateful
Stateful
**type**\: bool
.. attribute:: capability_update
Update capability
**type**\: bool
.. attribute:: capability_instantiate
Instantiation capability
**type**\: bool
.. attribute:: capability_segment_routing
Segment Routing capability
**type**\: bool
.. attribute:: capability_triggered_sync
Triggered Synchronization capability
**type**\: bool
.. attribute:: capability_db_version
DB version capability
**type**\: bool
.. attribute:: capability_delta_sync
Delta Synchronization capability
**type**\: bool
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.BriefPcepInformation, self).__init__()
self.yang_name = "brief-pcep-information"
self.yang_parent_name = "detail-pcep-information"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('pcep_state', YLeaf(YType.enumeration, 'pcep-state')),
('stateful', YLeaf(YType.boolean, 'stateful')),
('capability_update', YLeaf(YType.boolean, 'capability-update')),
('capability_instantiate', YLeaf(YType.boolean, 'capability-instantiate')),
('capability_segment_routing', YLeaf(YType.boolean, 'capability-segment-routing')),
('capability_triggered_sync', YLeaf(YType.boolean, 'capability-triggered-sync')),
('capability_db_version', YLeaf(YType.boolean, 'capability-db-version')),
('capability_delta_sync', YLeaf(YType.boolean, 'capability-delta-sync')),
])
self.pcep_state = None
self.stateful = None
self.capability_update = None
self.capability_instantiate = None
self.capability_segment_routing = None
self.capability_triggered_sync = None
self.capability_db_version = None
self.capability_delta_sync = None
self._segment_path = lambda: "brief-pcep-information"
def __setattr__(self, name, value):
self._perform_setattr(PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.BriefPcepInformation, ['pcep_state', 'stateful', 'capability_update', 'capability_instantiate', 'capability_segment_routing', 'capability_triggered_sync', 'capability_db_version', 'capability_delta_sync'], name, value)
class LastErrorRx(Entity):
"""
Last PCError received
.. attribute:: pc_error_type
PCEP Error type
**type**\: int
**range:** 0..255
.. attribute:: pc_error_value
PCEP Error Value
**type**\: int
**range:** 0..255
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorRx, self).__init__()
self.yang_name = "last-error-rx"
self.yang_parent_name = "detail-pcep-information"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('pc_error_type', YLeaf(YType.uint8, 'pc-error-type')),
('pc_error_value', YLeaf(YType.uint8, 'pc-error-value')),
])
self.pc_error_type = None
self.pc_error_value = None
self._segment_path = lambda: "last-error-rx"
def __setattr__(self, name, value):
self._perform_setattr(PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorRx, ['pc_error_type', 'pc_error_value'], name, value)
class LastErrorTx(Entity):
"""
Last PCError sent
.. attribute:: pc_error_type
PCEP Error type
**type**\: int
**range:** 0..255
.. attribute:: pc_error_value
PCEP Error Value
**type**\: int
**range:** 0..255
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorTx, self).__init__()
self.yang_name = "last-error-tx"
self.yang_parent_name = "detail-pcep-information"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('pc_error_type', YLeaf(YType.uint8, 'pc-error-type')),
('pc_error_value', YLeaf(YType.uint8, 'pc-error-value')),
])
self.pc_error_type = None
self.pc_error_value = None
self._segment_path = lambda: "last-error-tx"
def __setattr__(self, name, value):
self._perform_setattr(PcePeer.PeerDetailInfos.PeerDetailInfo.DetailPcepInformation.LastErrorTx, ['pc_error_type', 'pc_error_value'], name, value)
class PeerInfos(Entity):
"""
Peers database in XTC
.. attribute:: peer_info
PCE peer information
**type**\: list of :py:class:`PeerInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PcePeer.PeerInfos.PeerInfo>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PcePeer.PeerInfos, self).__init__()
self.yang_name = "peer-infos"
self.yang_parent_name = "pce-peer"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("peer-info", ("peer_info", PcePeer.PeerInfos.PeerInfo))])
self._leafs = OrderedDict()
self.peer_info = YList(self)
self._segment_path = lambda: "peer-infos"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce-peer/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(PcePeer.PeerInfos, [], name, value)
class PeerInfo(Entity):
"""
PCE peer information
.. attribute:: peer_address (key)
Peer Address
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: brief_pcep_information
PCE protocol information
**type**\: :py:class:`BriefPcepInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PcePeer.PeerInfos.PeerInfo.BriefPcepInformation>`
.. attribute:: peer_address_xr
Peer address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: peer_protocol
Protocol between PCE and peer
**type**\: :py:class:`PceProto <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceProto>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PcePeer.PeerInfos.PeerInfo, self).__init__()
self.yang_name = "peer-info"
self.yang_parent_name = "peer-infos"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['peer_address']
self._child_container_classes = OrderedDict([("brief-pcep-information", ("brief_pcep_information", PcePeer.PeerInfos.PeerInfo.BriefPcepInformation))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('peer_address', YLeaf(YType.str, 'peer-address')),
('peer_address_xr', YLeaf(YType.str, 'peer-address-xr')),
('peer_protocol', YLeaf(YType.enumeration, 'peer-protocol')),
])
self.peer_address = None
self.peer_address_xr = None
self.peer_protocol = None
self.brief_pcep_information = PcePeer.PeerInfos.PeerInfo.BriefPcepInformation()
self.brief_pcep_information.parent = self
self._children_name_map["brief_pcep_information"] = "brief-pcep-information"
self._children_yang_names.add("brief-pcep-information")
self._segment_path = lambda: "peer-info" | |
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import (
Input, LSTM, Dense, Bidirectional,
Layer, Dropout, MultiHeadAttention, LayerNormalization,
Embedding, GlobalAveragePooling2D, GlobalAveragePooling1D
)
from tensorflow.keras import Sequential
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD, Adam
import matplotlib.pyplot as plt
from sklearn.metrics import (
accuracy_score, f1_score,
precision_score, recall_score,
roc_auc_score, roc_curve,
confusion_matrix
)
class LogisticRegression:
def __init__(self, seed, checkpoint_path):
self.seed = seed
self.checkpoint_path = checkpoint_path
self.model = None
self.training_hist = None
def build(self, nbr_time_steps, loss, learning_rate, metrics_list):
i = Input(shape=(nbr_time_steps,))
x = Dense(1, activation='sigmoid')(i)
self.model = Model(i, x)
self.model.compile(
loss=loss,
optimizer=SGD(lr=learning_rate),
metrics=metrics_list
)
print(self.model.summary())
def load_saved_weights(self):
self.model.load_weights(self.checkpoint_path)
def train(self, X, Y, epochs):
# callback to monitor training and save weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=self.checkpoint_path,
save_weights_only=True,
verbose=1
)
# split time series into train/validation sets at a chosen time step
# here the data is simply split in half (first half train, later half validation)
nbr_samples = X.shape[0]
self.training_hist = self.model.fit(
X[:-nbr_samples // 2], Y[:-nbr_samples // 2],
epochs=epochs,
validation_data=(X[-nbr_samples // 2:], Y[-nbr_samples // 2:]),
callbacks=[cp_callback],
)
return self.training_hist
def plot_training_metrics(self):
if self.model or self.training_hist is None:
raise Exception("Must train the model before plotting.")
else:
# plot the training loss
plt.plot(self.training_hist.history['loss'], label='Training Loss')
plt.plot(self.training_hist.history['val_loss'], label='Validation Loss')
plt.title("Logistic Regression Loss by Epoch")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
# plot the training accuracy
plt.plot(self.training_hist.history['accuracy'], label='Training Accuracy')
plt.plot(self.training_hist.history['val_accuracy'], label='Validation Accuracy')
plt.title("Logistic Regression Classification Accuracy by Epoch")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
def make_predictions(self, X):
pred_probs = self.model.predict(X)
preds = np.where(pred_probs >= 0.5, 1, 0)
return pred_probs, preds
def evaluate(self, y_train, y_test, x_train=None, x_test=None, plot_roc=False):
train_pred_probs, train_preds = self.make_predictions(X=x_train)
test_pred_probs, test_preds = self.make_predictions(X=x_test)
# print evaluation
print("Logistic Regression Test Set Metrics")
print("Accuracy:", accuracy_score(y_test, test_preds))
print("F1 Score:", f1_score(y_test, test_preds))
print("Precision:", precision_score(y_test, test_preds))
print("Recall:", recall_score(y_test, test_preds))
print("ROC AUC:", roc_auc_score(y_test, test_preds))
print("Confusion Matrix Format: \n[TN, FP]\n[FN, TP]")
print(confusion_matrix(y_test, test_preds))
if plot_roc:
# plot the ROC curve
train_fpr, train_tpr, train_thresholds = roc_curve(y_train, train_pred_probs)
fpr, tpr, thresholds = roc_curve(y_test, test_pred_probs)
plt.plot(
train_fpr, train_tpr, color='darkorange',
label=f"Train ROC with AUC = {round(roc_auc_score(y_train, train_preds), 2)}"
)
plt.plot(
fpr, tpr, color='green',
label=f"Test ROC with AUC = {round(roc_auc_score(y_test, test_preds), 2)}"
)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Logistic Regression Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()
class DeepLSTM:
def __init__(self, seed, checkpoint_path):
self.seed = seed
self.checkpoint_path = checkpoint_path
self.model = None
self.training_hist = None
def build(self, nbr_time_steps, nbr_features, loss, learning_rate, metrics_list,
nbr_hidden_layers=1, hidden_layer_units=[10]):
i = Input(shape=(nbr_time_steps, nbr_features))
for layer in range(nbr_hidden_layers):
if layer == 0:
x = LSTM(hidden_layer_units[layer])(i)
else:
x = LSTM(hidden_layer_units[layer])(x)
x = Dense(1, activation='sigmoid')(x)
self.model = Model(i, x)
self.model.compile(
loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics_list
)
print(self.model.summary())
def load_saved_weights(self):
self.model.load_weights(self.checkpoint_path)
def train(self, X, Y, epochs):
# callback to monitor training and save weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=self.checkpoint_path,
save_weights_only=True,
verbose=1
)
# split time series into train/validation sets at a chosen time step
# here the data is simply split in half (first half train, later half validation)
nbr_samples = X.shape[0]
self.training_hist = self.model.fit(
X[:-nbr_samples // 2], Y[:-nbr_samples // 2],
epochs=epochs,
validation_data=(X[-nbr_samples // 2:], Y[-nbr_samples // 2:]),
callbacks=[cp_callback],
)
return self.training_hist
def plot_training_metrics(self):
if self.model or self.training_hist is None:
raise Exception("Must train the model before plotting.")
else:
# plot the training loss
plt.plot(self.training_hist.history['loss'], label='Training Loss')
plt.plot(self.training_hist.history['val_loss'], label='Validation Loss')
plt.title("LSTM Loss by Epoch")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
# plot the training accuracy
plt.plot(self.training_hist.history['accuracy'], label='Training Accuracy')
plt.plot(self.training_hist.history['val_accuracy'], label='Validation Accuracy')
plt.title("LSTM Classification Accuracy by Epoch")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
def make_predictions(self, X):
pred_probs = self.model.predict(X)
preds = np.where(pred_probs >= 0.5, 1, 0)
return pred_probs, preds
def evaluate(self, y_train, y_test, x_train=None, x_test=None, plot_roc=False):
train_pred_probs, train_preds = self.make_predictions(X=x_train)
test_pred_probs, test_preds = self.make_predictions(X=x_test)
# print evaluation
print("LSTM Test Set Metrics")
print("Accuracy:", accuracy_score(y_test, test_preds))
print("F1 Score:", f1_score(y_test, test_preds))
print("Precision:", precision_score(y_test, test_preds))
print("Recall:", recall_score(y_test, test_preds))
print("ROC AUC:", roc_auc_score(y_test, test_preds))
print("Confusion Matrix Format: \n[TN, FP]\n[FN, TP]")
print(confusion_matrix(y_test, test_preds))
if plot_roc:
# plot the ROC curve
train_fpr, train_tpr, train_thresholds = roc_curve(y_train, train_pred_probs)
fpr, tpr, thresholds = roc_curve(y_test, test_pred_probs)
plt.plot(
train_fpr, train_tpr, color='darkorange',
label=f"Train ROC with AUC = {round(roc_auc_score(y_train, train_preds), 2)}"
)
plt.plot(
fpr, tpr, color='green',
label=f"Test ROC with AUC = {round(roc_auc_score(y_test, test_preds), 2)}"
)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('LSTM Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()
class DeepBidirectionalLSTM:
def __init__(self, seed, checkpoint_path):
self.seed = seed
self.checkpoint_path = checkpoint_path
self.model = None
self.training_hist = None
def build(self, nbr_time_steps, nbr_features, loss, learning_rate, metrics_list,
nbr_hidden_layers=1, hidden_layer_units=[10]):
"""
Builds the LSTM architecture.
:param nbr_time_steps: Time steps in the sequence, or the number of words in a text sequence for NLP problems
:param nbr_features: Features, or the number of latent features/embedding dimensions for NLP problems
:param loss: Type of loss to optimize
:param learning_rate: Controls the size of the adjustments to the model's weights in each iteration
:param metrics_list: Any metrics to track while training (model will optimize loss, but track these too)
:param nbr_hidden_layers: How deep to make the network
:param hidden_layer_units: List of the number of units per layer, list length should match nbr_hidden_layers
"""
i = Input(shape=(nbr_time_steps, nbr_features))
for layer in range(nbr_hidden_layers):
if layer == 0:
x = Bidirectional(LSTM(hidden_layer_units[layer]), merge_mode="concat")(i)
else:
x = Bidirectional(LSTM(hidden_layer_units[layer]), merge_mode="concat")(x)
x = Dense(1, activation='sigmoid')(x)
self.model = Model(i, x)
self.model.compile(
loss=loss,
optimizer=Adam(lr=learning_rate),
metrics=metrics_list
)
print(self.model.summary())
def load_saved_weights(self):
self.model.load_weights(self.checkpoint_path)
def train(self, X, Y, epochs):
# callback to monitor training and save weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=self.checkpoint_path,
save_weights_only=True,
verbose=1
)
# split time series into train/validation sets at a chosen time step
# here the data is simply split in half (first half train, later half validation)
nbr_samples = X.shape[0]
self.training_hist = self.model.fit(
X[:-nbr_samples // 2], Y[:-nbr_samples // 2],
epochs=epochs,
validation_data=(X[-nbr_samples // 2:], Y[-nbr_samples // 2:]),
callbacks=[cp_callback],
)
return self.training_hist
def plot_training_metrics(self):
if self.model or self.training_hist is None:
raise Exception("Must train the model before plotting.")
else:
# plot the training loss
plt.plot(self.training_hist.history['loss'], label='Training Loss')
plt.plot(self.training_hist.history['val_loss'], label='Validation Loss')
plt.title("Bidirectional LSTM Loss by Epoch")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
# plot the training accuracy
plt.plot(self.training_hist.history['accuracy'], label='Training Accuracy')
plt.plot(self.training_hist.history['val_accuracy'], label='Validation Accuracy')
plt.title("Bidirectional LSTM Classification Accuracy by Epoch")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
def make_predictions(self, X):
pred_probs = self.model.predict(X)
preds = np.where(pred_probs >= 0.5, 1, 0)
return pred_probs, preds
def evaluate(self, y_train, y_test, x_train=None, x_test=None, plot_roc=False):
train_pred_probs, train_preds = self.make_predictions(X=x_train)
test_pred_probs, test_preds = self.make_predictions(X=x_test)
# print evaluation
print("Bidirectional LSTM Test Set Metrics")
print("Accuracy:", accuracy_score(y_test, test_preds))
print("F1 Score:", f1_score(y_test, test_preds))
print("Precision:", precision_score(y_test, test_preds))
print("Recall:", recall_score(y_test, test_preds))
print("ROC AUC:", roc_auc_score(y_test, test_preds))
print("Confusion Matrix Format: \n[TN, FP]\n[FN, TP]")
print(confusion_matrix(y_test, test_preds))
if plot_roc:
# plot the ROC curve
train_fpr, train_tpr, train_thresholds = roc_curve(y_train, train_pred_probs)
fpr, tpr, thresholds = roc_curve(y_test, test_pred_probs)
plt.plot(
train_fpr, train_tpr, color='darkorange',
label=f"Train ROC with AUC = {round(roc_auc_score(y_train, train_preds), 2)}"
)
plt.plot(
fpr, tpr, color='green',
label=f"Test ROC with AUC = {round(roc_auc_score(y_test, test_preds), 2)}"
)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Bidirectional LSTM Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()
class TransformerBlock(Layer):
"""
Class borrowed from: https://keras.io/examples/nlp/text_classification_with_transformer/
Note: this is only the encoder block from a full transformer
"""
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super(TransformerBlock, self).__init__()
self.att = MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
self.ffn = Sequential(
[Dense(ff_dim, activation="relu"), Dense(embed_dim), ]
)
self.layernorm1 = LayerNormalization(epsilon=1e-6)
self.layernorm2 = LayerNormalization(epsilon=1e-6)
self.dropout1 = Dropout(rate)
self.dropout2 = Dropout(rate)
def call(self, inputs, training):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
class ValueAndPositionEmbedding(Layer):
"""
These 2 sources helped inspire this class:
https://keras.io/examples/nlp/text_classification_with_transformer/
https://keras.io/examples/vision/image_classification_with_vision_transformer/
"""
def __init__(self, sequence_len, embed_dim):
super(ValueAndPositionEmbedding, self).__init__()
self.val_input = Dense(units=embed_dim) # dense layer replaces token embedding from language model
self.pos_emb = Embedding(input_dim=sequence_len, output_dim=embed_dim)
def call(self, x):
sequence_len = tf.shape(x)[-1]
positions = tf.range(start=0, limit=sequence_len, delta=1)
positions = self.pos_emb(positions)
x = self.val_input(x)
return x + positions
class DeepTransformer:
def __init__(self, seed, checkpoint_path):
self.seed = seed
self.checkpoint_path = checkpoint_path
self.model = None
self.training_hist = None
def build(self, max_seq_len, nbr_features, embed_dim, loss, learning_rate, metrics_list,
nbr_transformer_blocks=1, nbr_attention_heads_each_block=2, nbr_dense_units_each_block=32):
"""
Builds the transformer architecture. For advice in choosing the number of attention heads, see:
https://blog.ml.cmu.edu/2020/03/20/are-sixteen-heads-really-better-than-one/
Spoiler: more heads 'can' help with training, but you are likely better off using as | |
"""
#Only can do this if the interactome is big enough
if len(PCSFInputObj.undirEdges) + len(PCSFInputObj.dirEdges) < 50:
sys.exit("Cannot use --randomTerminals with such a small interactome.")
#Make a new PCSFInput object that contains all the same values as the original but empty prizes
newPCSFInputObj = copy.deepcopy(PCSFInputObj)
newPCSFInputObj.origPrizes = {'':0}
#degrees is a sorted list that will hold outdegree of every node in interactome
degrees = []
if len(PCSFInputObj.undirEdges) > 0 and len(PCSFInputObj.dirEdges) > 0:
for node in PCSFInputObj.undirEdges:
try:
degrees.append((node,len(PCSFInputObj.undirEdges[node])+ \
len(PCSFInputObj.dirEdges[node])))
except KeyError:
degrees.append((node,len(PCSFInputObj.undirEdges[node])))
for node in PCSFInputObj.dirEdges:
if node not in PCSFInputObj.undirEdges:
degrees.append((node,len(PCSFInputObj.dirEdges[node])))
else:
for node in PCSFInputObj.undirEdges:
degrees.append((node,len(PCSFInputObj.undirEdges[node])))
for node in PCSFInputObj.dirEdges:
degrees.append((node,len(PCSFInputObj.dirEdges[node])))
degrees.sort(key=itemgetter(1))
#Find index of current terminal in degrees list
for k,terminal in enumerate(PCSFInputObj.origPrizes):
for i,value in enumerate(degrees):
if terminal == value[0]:
index = i
break
#Choose an index offset to select new terminal (distance from orig terminal in degrees list)
#Make sure newly chosen terminal is not already chosen on a previous round
newTerm = ''
i = -1
while newTerm in newPCSFInputObj.origPrizes and i<=10000:
i+=1
if seed != None:
random.seed(seed+k+i)
offset = int(random.gauss(0.0,100.0))
newIndex = index + offset
#if offset causes the index to wraparound to the other side of the list, try again
if newIndex<0: continue
try:
newNode = degrees[newIndex]
except IndexError:
#if offset points outside list, try loop again
continue
#To make truly random, need to choose randomly between all nodes with the same degree
#Otherwise, ordering of dict iteration matters
nodesWithSameDegree = []
for node in degrees[newIndex:]:
if node[1] == newNode[1]:
nodesWithSameDegree.append(node)
else:
break
for node in degrees[newIndex-1::-1]:
if node[1] == newNode[1]:
nodesWithSameDegree.append(node)
else:
break
newTerm = random.choice(nodesWithSameDegree)[0]
#if we've tried 10000 times, throw error to avoid infinite loop
if newTerm in newPCSFInputObj.origPrizes:
sys.exit('There was a problem with --randomTerminals. Aborting.')
#Assign prize to newly chosen terminal
newPCSFInputObj.origPrizes[newTerm] = PCSFInputObj.origPrizes[terminal]
del newPCSFInputObj.origPrizes['']
newPCSFInputObj.assignNegPrizes(newPCSFInputObj.musquared,excludeT)
print 'New degree-matched terminals have been chosen.\n'
return newPCSFInputObj
#wrapper function for runPCSF() multiprocessing
def PCSF(inputObj,msgpath,seed):
(Edge, Info) = inputObj.runPCSF(msgpath,seed)
return (Edge, Info)
def changeValuesAndMergeResults(func, seed, inputObj, numRuns, msgpath, outputpath, outputlabel,
excludeT):
"""
Changes the prizes/edges in the PCSFInput object according to func and runs the msgsteiner
algorithm, then merges the results together with the given PCSFOutput object. Writes
cytoscape files for final merged results.
INPUT: func - the function which takes inputObj and changes the prize/edge values
(i.e. shuffles or adds noise)
inputObj - a PCSFInput object with original values, to be changed.
numRums - the number of times to change the values and re-run msgsteiner
msgpath - path to the directory where msgsteiner is kept
outputpath - path to the directory where output files should be stored
outputlabel - a label with which to name all of the output files for this run
OUTPUT: <outputlabel>_changed_#_info.txt - a text file FOR EACH RUN containing the
contents of stderr for all msgsteiner runs
RETURNS: merged - the PCSFOutput object that is a result of all the merges
"""
print 'Preparing to change values %i times and get merged results of running the '\
'algorithm on new values.\n' %numRuns
#Create multiprocessing Pool
if inputObj.processes == None:
pool = mp.Pool()
else:
pool = mp.Pool(inputObj.processes)
if seed != None:
#For each run, create process, change prize/edge values and run msgsteiner
#Note that each run will create a info file
results = [pool.apply_async(PCSF, args=(func(inputObj, seed+i, excludeT),
msgpath, seed+i,)) for i in xrange(numRuns)]
else:
results = [pool.apply_async(PCSF, args=(func(inputObj, seed, excludeT),
msgpath,seed,)) for i in xrange(numRuns)]
output = [p.get() for p in results]
i = 0
#Merge output of new msgsteiner runs together
while i <= numRuns-1:
(newEdgeList, newInfo) = output[i]
#By creating the output object with inputObj instead of changedInputObj,
#the prizes stored in the networkx graphs will be the ORIGINAL CORRECT prizes,
#not the changed prizes.
if str(func)[10:23] == 'shufflePrizes':
changedOutputObj = PCSFOutput(inputObj, newEdgeList, newInfo, outputpath,
outputlabel+'_shuffledPrizes_%i'%i, 0)
elif str(func)[10:20] == 'noiseEdges':
changedOutputObj = PCSFOutput(inputObj, newEdgeList, newInfo, outputpath,
outputlabel+'_noisyEdges_%i'%i, 0)
elif str(func)[10:25] == 'randomTerminals':
changedOutputObj = PCSFOutput(inputObj, newEdgeList, newInfo, outputpath,
outputlabel+'_randomTerminals_%i'%i, 0)
if i == 0:
#first run
merged = changedOutputObj
elif i == numRuns-1:
#last run, merge results and calculate betweenness
merged = mergeOutputs(merged, changedOutputObj, 1, i, 1)
else:
#Merge results of runs with the merged object containing all results so far
merged = mergeOutputs(merged, changedOutputObj, 0, i, 1)
i += 1
#return merged outputobj
return merged
def crossValidation(k, rep, PCSFInputObj, seed, msgpath, outputpath, outputlabel):
"""
Seperates prizes into k "folds" and leaves those out of analysis. Reports what fraction of
held-out prize nodes were returned as steiner nodes.
INPUT: k - the number of "folds" to seperate the prize nodes into.
rep - Repetition of k-fold cv calculation we are currently on
PCSFInputObj - a PCSF object with all prize nodes
seed - number to give to the random number generator
msgpath - path to msgsteiner code
outputpath - path to the directory where output files should be stored
outputlabel - a label with which to name all of the output files for this run
OUTPUTS: File <outputlabel>_cvResults_<rep>.txt containing stats from the cv run
Files showing steiners and terminals for each of the intermediate solutions
"""
print 'Running %i-fold cross validation (rep %i).\n'%(k,rep)
prizes = PCSFInputObj.origPrizes.keys()
if seed != None:
random.seed(seed+rep)
else: random.seed(None)
random.shuffle(prizes)
iterations = ''
#Do k iterations
for i in range(0,k):
#File to store steiner and terminal nodes in results
outputs = open('%s/%s_cvIntermediate_rep%ik%i.txt'%(outputpath,outputlabel,rep,i), 'wb')
#select random prizes to hold out of this round
hold_out = prizes[i:len(prizes):k]
#keep track of these prizes which are returned in optimal network
recovered = []
#keep track of steiner nodes
steiners = []
#keep track of chosen terminals
terminals = []
newPCSFInputObj = copy.deepcopy(PCSFInputObj)
for p in hold_out:
#Remove held out original prize and update total prize to reflect only negPrize
del newPCSFInputObj.origPrizes[p]
newPCSFInputObj.totalPrizes[p] = newPCSFInputObj.negPrizes[p]
(newEdgeList, newInfo) = newPCSFInputObj.runPCSF(msgpath, seed)
#See if held out proteins appear in newEdgeList
edges = newEdgeList.split('\n')
for edge in edges:
words = edge.split()
if len(words) > 0:
for node in (words[0], words[1]):
if node in hold_out:
if node not in recovered:
recovered.append(node)
steiners.append(node)
elif node not in prizes:
if node != 'DUMMY' and node not in steiners:
steiners.append(node)
else:
if node not in terminals:
terminals.append(node)
#Write out lists for this fold's results
outputs.write('Recovered Terminals\n')
outputs.write(str(recovered))
outputs.write('\nAll Steiner Nodes\n')
outputs.write(str(steiners))
outputs.write('\nTerminals\n')
outputs.write(str(terminals))
outputs.close
#Return num of held-out terminals, num of recovered hold-outs, total num of Steiner nodes
numRecovered = len(recovered)
numSteiners = len(steiners)
iterations = iterations + (str(i+1)+ '\t' + str(len(hold_out))+ '\t' + str(numRecovered)+\
'\t' + str(numSteiners) + '\n')
results = open('%s/%s_cvResults_%i.txt'%(outputpath,outputlabel,rep), 'wb')
results.write('Iteration\tNum of held-out terminals\tNum of recovered terminals\t'\
'Total num of Steiner nodes\n')
results.write(iterations)
results.close
def main():
#Parsing arguments (run python PCSF.py -h to see all these decriptions)
parser = argparse.ArgumentParser(description='Find multiple pathways within an interactome '\
'that are altered in a particular condition using the Prize Collecting Steiner Forest '\
'problem')
#required arguments
parser.add_argument("-p", "--prize", dest='prizeFile', help='(Required) Path to the text file '\
'containing the prizes. Should be a tab delimited file with lines: "ProteinName'\
'\tPrizeValue"')
parser.add_argument("-e", "--edge", dest='edgeFile', help ='(Required) Path to the text file '\
'containing the interactome edges. Should be a tab delimited file with 3 or 4 columns: '\
'"ProteinA\tProteinB\tWeight(between 0 and 1)\tDirectionality(U or D, optional)"')
#optional arguments
parser.add_argument("-c", "--conf", dest='confFile', help='Path to the text file containing '\
'the parameters. Should be several lines that looks like: "ParameterName = '\
'ParameterValue". Must contain values for w, b, D. May contain values for optional '\
'parameters mu, garnetBeta, noise, r, g. Default = "./conf.txt"', default='conf.txt')
parser.add_argument("-d","--dummyMode", dest='dummyMode', help='Tells the program which nodes '\
'in the interactome to connect the dummy node to. "terminals"= connect to all terminals, '\
'"others"= connect to all nodes except for terminals, "all"= connect to all '\
'nodes in the interactome. If you wish you supply your | |
5]
self.initialized = True
def setup_pieces_pawn(self, p_data: int, p_tb_size: int, f: int) -> None:
j = 1 + int(self.pawns[1] > 0)
order = self.data[p_data] & 0x0f
order2 = self.data[p_data + 1] & 0x0f if self.pawns[1] else 0x0f
self.files[f].pieces[0] = [self.data[p_data + i + j] & 0x0f for i in range(self.num)]
self.files[f].norm[0] = [0 for _ in range(self.num)]
self.set_norm_pawn(self.files[f].norm[0], self.files[f].pieces[0])
self.files[f].factor[0] = [0 for _ in range(TBPIECES)]
self.tb_size[p_tb_size] = self.calc_factors_pawn(self.files[f].factor[0], order, order2, self.files[f].norm[0], f)
order = self.data[p_data] >> 4
order2 = self.data[p_data + 1] >> 4 if self.pawns[1] else 0x0f
self.files[f].pieces[1] = [self.data[p_data + i + j] >> 4 for i in range(self.num)]
self.files[f].norm[1] = [0 for _ in range(self.num)]
self.set_norm_pawn(self.files[f].norm[1], self.files[f].pieces[1])
self.files[f].factor[1] = [0 for _ in range(TBPIECES)]
self.tb_size[p_tb_size + 1] = self.calc_factors_pawn(self.files[f].factor[1], order, order2, self.files[f].norm[1], f)
def setup_pieces_piece(self, p_data: int) -> None:
self.pieces[0] = [self.data[p_data + i + 1] & 0x0f for i in range(self.num)]
order = self.data[p_data] & 0x0f
self.set_norm_piece(self.norm[0], self.pieces[0])
self.tb_size[0] = self.calc_factors_piece(self.factor[0], order, self.norm[0])
self.pieces[1] = [self.data[p_data + i + 1] >> 4 for i in range(self.num)]
order = self.data[p_data] >> 4
self.set_norm_piece(self.norm[1], self.pieces[1])
self.tb_size[1] = self.calc_factors_piece(self.factor[1], order, self.norm[1])
def probe_wdl_table(self, board: chess.Board) -> int:
try:
with self.read_condition:
self.read_count += 1
return self._probe_wdl_table(board)
finally:
with self.read_condition:
self.read_count -= 1
self.read_condition.notify()
def _probe_wdl_table(self, board: chess.Board) -> int:
self.init_table_wdl()
key = calc_key(board)
if not self.symmetric:
if key != self.key:
cmirror = 8
mirror = 0x38
bside = int(board.turn == chess.WHITE)
else:
cmirror = mirror = 0
bside = int(board.turn != chess.WHITE)
else:
cmirror = 0 if board.turn == chess.WHITE else 8
mirror = 0 if board.turn == chess.WHITE else 0x38
bside = 0
if not self.has_pawns:
p = [0 for _ in range(TBPIECES)]
i = 0
while i < self.num:
piece_type = self.pieces[bside][i] & 0x07
color = (self.pieces[bside][i] ^ cmirror) >> 3
bb = board.pieces_mask(piece_type, chess.WHITE if color == 0 else chess.BLACK)
for square in chess.scan_forward(bb):
p[i] = square
i += 1
idx = self.encode_piece(self.norm[bside], p, self.factor[bside])
res = self.decompress_pairs(self.precomp[bside], idx)
else:
p = [0 for _ in range(TBPIECES)]
i = 0
k = self.files[0].pieces[0][0] ^ cmirror
color = k >> 3
piece_type = k & 0x07
bb = board.pieces_mask(piece_type, chess.WHITE if color == 0 else chess.BLACK)
for square in chess.scan_forward(bb):
p[i] = square ^ mirror
i += 1
f = self.pawn_file(p)
pc = self.files[f].pieces[bside]
while i < self.num:
color = (pc[i] ^ cmirror) >> 3
piece_type = pc[i] & 0x07
bb = board.pieces_mask(piece_type, chess.WHITE if color == 0 else chess.BLACK)
for square in chess.scan_forward(bb):
p[i] = square ^ mirror
i += 1
idx = self.encode_pawn(self.files[f].norm[bside], p, self.files[f].factor[bside])
res = self.decompress_pairs(self.files[f].precomp[bside], idx)
return res - 2
class DtzTable(Table):
def init_table_dtz(self) -> None:
with self.write_lock:
self.init_mmap()
if self.initialized:
return
self.check_magic(self.variant.tbz_magic, self.variant.pawnless_tbz_magic)
self.factor = [0 for _ in range(TBPIECES)]
self.norm = [0 for _ in range(self.num)]
self.tb_size = [0, 0, 0, 0]
self.size = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.files = [PawnFileDataDtz() for f in range(4)]
files = 4 if self.data[4] & 0x02 else 1
p_data = 5
if not self.has_pawns:
self.map_idx = [0, 0, 0, 0]
self.setup_pieces_piece_dtz(p_data, 0)
p_data += self.num + 1
p_data += p_data & 0x01
self.precomp = self.setup_pairs(p_data, self.tb_size[0], 0, False)
self.flags = self._flags
p_data = self._next
self.p_map = p_data
if self.flags & 2:
if not self.flags & 16:
for i in range(4):
self.map_idx[i] = p_data + 1 - self.p_map
p_data += 1 + self.data[p_data]
else:
for i in range(4):
self.map_idx[i] = (p_data + 2 - self.p_map) // 2
p_data += 2 + 2 * self.read_uint16(p_data)
p_data += p_data & 0x01
self.precomp.indextable = p_data
p_data += self.size[0]
self.precomp.sizetable = p_data
p_data += self.size[1]
p_data = (p_data + 0x3f) & ~0x3f
self.precomp.data = p_data
p_data += self.size[2]
self.key = recalc_key(self.pieces)
self.mirrored_key = recalc_key(self.pieces, mirror=True)
else:
s = 1 + int(self.pawns[1] > 0)
for f in range(4):
self.setup_pieces_pawn_dtz(p_data, f, f)
p_data += self.num + s
p_data += p_data & 0x01
self.flags = []
for f in range(files):
self.files[f].precomp = self.setup_pairs(p_data, self.tb_size[f], 3 * f, False)
p_data = self._next
self.flags.append(self._flags)
self.map_idx = []
self.p_map = p_data
for f in range(files):
self.map_idx.append([])
if self.flags[f] & 2:
if not self.flags[f] & 16:
for _ in range(4):
self.map_idx[-1].append(p_data + 1 - self.p_map)
p_data += 1 + self.data[p_data]
else:
p_data += p_data & 0x01
for _ in range(4):
self.map_idx[-1].append((p_data + 2 - self.p_map) // 2)
p_data += 2 + 2 * self.read_uint16(p_data)
p_data += p_data & 0x01
for f in range(files):
self.files[f].precomp.indextable = p_data
p_data += self.size[3 * f]
for f in range(files):
self.files[f].precomp.sizetable = p_data
p_data += self.size[3 * f + 1]
for f in range(files):
p_data = (p_data + 0x3f) & ~0x3f
self.files[f].precomp.data = p_data
p_data += self.size[3 * f + 2]
self.initialized = True
def probe_dtz_table(self, board: chess.Board, wdl: int) -> Tuple[int, int]:
try:
with self.read_condition:
self.read_count += 1
return self._probe_dtz_table(board, wdl)
finally:
with self.read_condition:
self.read_count -= 1
self.read_condition.notify()
def _probe_dtz_table(self, board: chess.Board, wdl: int) -> Tuple[int, int]:
self.init_table_dtz()
key = calc_key(board)
if not self.symmetric:
if key != self.key:
cmirror = 8
mirror = 0x38
bside = int(board.turn == chess.WHITE)
else:
cmirror = mirror = 0
bside = int(board.turn != chess.WHITE)
else:
cmirror = 0 if board.turn == chess.WHITE else 8
mirror = 0 if board.turn == chess.WHITE else 0x38
bside = 0
if not self.has_pawns:
if (self.flags & 1) != bside and not self.symmetric:
return 0, -1
pc = self.pieces
p = [0 for _ in range(TBPIECES)]
i = 0
while i < self.num:
piece_type = pc[i] & 0x07
color = (pc[i] ^ cmirror) >> 3
bb = board.pieces_mask(piece_type, chess.WHITE if color == 0 else chess.BLACK)
for square in chess.scan_forward(bb):
p[i] = square
i += 1
idx = self.encode_piece(self.norm, p, self.factor)
res = self.decompress_pairs(self.precomp, idx)
if self.flags & 2:
if not self.flags & 16:
res = self.data[self.p_map + self.map_idx[WDL_TO_MAP[wdl + 2]] + res]
else:
res = self.read_uint16(self.p_map + 2 * (self.map_idx[WDL_TO_MAP[wdl + 2]] + res))
if (not (self.flags & PA_FLAGS[wdl + 2])) or (wdl & 1):
res *= 2
else:
k = self.files[0].pieces[0] ^ cmirror
piece_type = k & 0x07
color = k >> 3
bb = board.pieces_mask(piece_type, chess.WHITE if color == 0 else chess.BLACK)
i = 0
p = [0 for _ in range(TBPIECES)]
for square in chess.scan_forward(bb):
p[i] = square ^ mirror
i += 1
f = self.pawn_file(p)
if self.flags[f] & 1 != bside:
return 0, -1
pc = self.files[f].pieces
while i < self.num:
piece_type = pc[i] & 0x07
color = (pc[i] ^ cmirror) >> 3
bb = board.pieces_mask(piece_type, chess.WHITE if color == 0 else chess.BLACK)
for square in chess.scan_forward(bb):
p[i] = square ^ mirror
i += 1
idx = self.encode_pawn(self.files[f].norm, p, self.files[f].factor)
res = self.decompress_pairs(self.files[f].precomp, idx)
if self.flags[f] & 2:
if not self.flags[f] & 16:
res = self.data[self.p_map + self.map_idx[f][WDL_TO_MAP[wdl + 2]] + res]
else:
res = self.read_uint16(self.p_map + 2 * (self.map_idx[f][WDL_TO_MAP[wdl + 2]] + res))
if (not (self.flags[f] & PA_FLAGS[wdl + 2])) or (wdl & 1):
res *= 2
return res, 1
def setup_pieces_piece_dtz(self, p_data: int, p_tb_size: int) -> None:
self.pieces = [self.data[p_data + i + 1] & 0x0f for i in range(self.num)]
order = self.data[p_data] & 0x0f
self.set_norm_piece(self.norm, self.pieces)
self.tb_size[p_tb_size] = self.calc_factors_piece(self.factor, order, self.norm)
def setup_pieces_pawn_dtz(self, p_data: int, p_tb_size: int, f: int) -> None:
j = 1 + int(self.pawns[1] > 0)
order = self.data[p_data] & 0x0f
order2 = self.data[p_data + 1] & 0x0f if self.pawns[1] else 0x0f
self.files[f].pieces = [self.data[p_data + i + j] & 0x0f for i in range(self.num)]
self.files[f].norm = [0 for _ in range(self.num)]
self.set_norm_pawn(self.files[f].norm, self.files[f].pieces)
self.files[f].factor = [0 for _ in range(TBPIECES)]
self.tb_size[p_tb_size] = self.calc_factors_pawn(self.files[f].factor, order, order2, self.files[f].norm, f)
class Tablebase:
"""
Manages a collection of tablebase files for probing.
If *max_fds* is not ``None``, will at most use *max_fds* open file
descriptors at any given time. The least recently used tables are closed,
if nescessary.
"""
def __init__(self, *, max_fds: Optional[int] = 128, VariantBoard: | |
"""Generated client library for toolresults version v1beta3."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.toolresults.v1beta3 import toolresults_v1beta3_messages as messages
class ToolresultsV1beta3(base_api.BaseApiClient):
"""Generated client library for service toolresults version v1beta3."""
MESSAGES_MODULE = messages
BASE_URL = u'https://www.googleapis.com/toolresults/v1beta3/'
_PACKAGE = u'toolresults'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform']
_VERSION = u'v1beta3'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'ToolresultsV1beta3'
_URL_VERSION = u'v1beta3'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new toolresults handle."""
url = url or self.BASE_URL
super(ToolresultsV1beta3, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_histories_executions_clusters = self.ProjectsHistoriesExecutionsClustersService(self)
self.projects_histories_executions_steps_perfMetricsSummary = self.ProjectsHistoriesExecutionsStepsPerfMetricsSummaryService(self)
self.projects_histories_executions_steps_perfSampleSeries_samples = self.ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesService(self)
self.projects_histories_executions_steps_perfSampleSeries = self.ProjectsHistoriesExecutionsStepsPerfSampleSeriesService(self)
self.projects_histories_executions_steps_testCases = self.ProjectsHistoriesExecutionsStepsTestCasesService(self)
self.projects_histories_executions_steps_thumbnails = self.ProjectsHistoriesExecutionsStepsThumbnailsService(self)
self.projects_histories_executions_steps = self.ProjectsHistoriesExecutionsStepsService(self)
self.projects_histories_executions = self.ProjectsHistoriesExecutionsService(self)
self.projects_histories = self.ProjectsHistoriesService(self)
self.projects = self.ProjectsService(self)
class ProjectsHistoriesExecutionsClustersService(base_api.BaseApiService):
"""Service class for the projects_histories_executions_clusters resource."""
_NAME = u'projects_histories_executions_clusters'
def __init__(self, client):
super(ToolresultsV1beta3.ProjectsHistoriesExecutionsClustersService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Retrieves a single screenshot cluster by its ID.
Args:
request: (ToolresultsProjectsHistoriesExecutionsClustersGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ScreenshotCluster) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'toolresults.projects.histories.executions.clusters.get',
ordered_params=[u'projectId', u'historyId', u'executionId', u'clusterId'],
path_params=[u'clusterId', u'executionId', u'historyId', u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/histories/{historyId}/executions/{executionId}/clusters/{clusterId}',
request_field='',
request_type_name=u'ToolresultsProjectsHistoriesExecutionsClustersGetRequest',
response_type_name=u'ScreenshotCluster',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists Screenshot Clusters.
Returns the list of screenshot clusters corresponding to an execution. Screenshot clusters are created after the execution is finished. Clusters are created from a set of screenshots. Between any two screenshots, a matching score is calculated based off their metadata that determines how similar they are. Screenshots are placed in the cluster that has screens which have the highest matching scores.
Args:
request: (ToolresultsProjectsHistoriesExecutionsClustersListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListScreenshotClustersResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'toolresults.projects.histories.executions.clusters.list',
ordered_params=[u'projectId', u'historyId', u'executionId'],
path_params=[u'executionId', u'historyId', u'projectId'],
query_params=[],
relative_path=u'projects/{projectId}/histories/{historyId}/executions/{executionId}/clusters',
request_field='',
request_type_name=u'ToolresultsProjectsHistoriesExecutionsClustersListRequest',
response_type_name=u'ListScreenshotClustersResponse',
supports_download=False,
)
class ProjectsHistoriesExecutionsStepsPerfMetricsSummaryService(base_api.BaseApiService):
"""Service class for the projects_histories_executions_steps_perfMetricsSummary resource."""
_NAME = u'projects_histories_executions_steps_perfMetricsSummary'
def __init__(self, client):
super(ToolresultsV1beta3.ProjectsHistoriesExecutionsStepsPerfMetricsSummaryService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a PerfMetricsSummary resource. Returns the existing one if it has already been created.
May return any of the following error code(s): - NOT_FOUND - The containing Step does not exist
Args:
request: (PerfMetricsSummary) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(PerfMetricsSummary) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'toolresults.projects.histories.executions.steps.perfMetricsSummary.create',
ordered_params=[u'projectId', u'historyId', u'executionId', u'stepId'],
path_params=[u'executionId', u'historyId', u'projectId', u'stepId'],
query_params=[],
relative_path=u'projects/{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfMetricsSummary',
request_field='<request>',
request_type_name=u'PerfMetricsSummary',
response_type_name=u'PerfMetricsSummary',
supports_download=False,
)
class ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesService(base_api.BaseApiService):
"""Service class for the projects_histories_executions_steps_perfSampleSeries_samples resource."""
_NAME = u'projects_histories_executions_steps_perfSampleSeries_samples'
def __init__(self, client):
super(ToolresultsV1beta3.ProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesService, self).__init__(client)
self._upload_configs = {
}
def BatchCreate(self, request, global_params=None):
r"""Creates a batch of PerfSamples - a client can submit multiple batches of Perf Samples through repeated calls to this method in order to split up a large request payload - duplicates and existing timestamp entries will be ignored. - the batch operation may partially succeed - the set of elements successfully inserted is returned in the response (omits items which already existed in the database).
May return any of the following canonical error codes: - NOT_FOUND - The containing PerfSampleSeries does not exist
Args:
request: (ToolresultsProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesBatchCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BatchCreatePerfSamplesResponse) The response message.
"""
config = self.GetMethodConfig('BatchCreate')
return self._RunMethod(
config, request, global_params=global_params)
BatchCreate.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'toolresults.projects.histories.executions.steps.perfSampleSeries.samples.batchCreate',
ordered_params=[u'projectId', u'historyId', u'executionId', u'stepId', u'sampleSeriesId'],
path_params=[u'executionId', u'historyId', u'projectId', u'sampleSeriesId', u'stepId'],
query_params=[],
relative_path=u'projects/{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries/{sampleSeriesId}/samples:batchCreate',
request_field=u'batchCreatePerfSamplesRequest',
request_type_name=u'ToolresultsProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesBatchCreateRequest',
response_type_name=u'BatchCreatePerfSamplesResponse',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists the Performance Samples of a given Sample Series - The list results are sorted by timestamps ascending - The default page size is 500 samples; and maximum size allowed 5000 - The response token indicates the last returned PerfSample timestamp - When the results size exceeds the page size, submit a subsequent request including the page token to return the rest of the samples up to the page limit.
May return any of the following canonical error codes: - OUT_OF_RANGE - The specified request page_token is out of valid range - NOT_FOUND - The containing PerfSampleSeries does not exist
Args:
request: (ToolresultsProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListPerfSamplesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'toolresults.projects.histories.executions.steps.perfSampleSeries.samples.list',
ordered_params=[u'projectId', u'historyId', u'executionId', u'stepId', u'sampleSeriesId'],
path_params=[u'executionId', u'historyId', u'projectId', u'sampleSeriesId', u'stepId'],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'projects/{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries/{sampleSeriesId}/samples',
request_field='',
request_type_name=u'ToolresultsProjectsHistoriesExecutionsStepsPerfSampleSeriesSamplesListRequest',
response_type_name=u'ListPerfSamplesResponse',
supports_download=False,
)
class ProjectsHistoriesExecutionsStepsPerfSampleSeriesService(base_api.BaseApiService):
"""Service class for the projects_histories_executions_steps_perfSampleSeries resource."""
_NAME = u'projects_histories_executions_steps_perfSampleSeries'
def __init__(self, client):
super(ToolresultsV1beta3.ProjectsHistoriesExecutionsStepsPerfSampleSeriesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a PerfSampleSeries.
May return any of the following error code(s): - ALREADY_EXISTS - PerfMetricSummary already exists for the given Step - NOT_FOUND - The containing Step does not exist
Args:
request: (PerfSampleSeries) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(PerfSampleSeries) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'toolresults.projects.histories.executions.steps.perfSampleSeries.create',
ordered_params=[u'projectId', u'historyId', u'executionId', u'stepId'],
path_params=[u'executionId', u'historyId', u'projectId', u'stepId'],
query_params=[],
relative_path=u'projects/{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries',
request_field='<request>',
request_type_name=u'PerfSampleSeries',
response_type_name=u'PerfSampleSeries',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a PerfSampleSeries.
May return any of the following error code(s): - NOT_FOUND - The specified PerfSampleSeries does not exist
Args:
request: (ToolresultsProjectsHistoriesExecutionsStepsPerfSampleSeriesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(PerfSampleSeries) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'toolresults.projects.histories.executions.steps.perfSampleSeries.get',
ordered_params=[u'projectId', u'historyId', u'executionId', u'stepId', u'sampleSeriesId'],
path_params=[u'executionId', u'historyId', u'projectId', u'sampleSeriesId', u'stepId'],
query_params=[],
relative_path=u'projects/{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries/{sampleSeriesId}',
request_field='',
request_type_name=u'ToolresultsProjectsHistoriesExecutionsStepsPerfSampleSeriesGetRequest',
response_type_name=u'PerfSampleSeries',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists PerfSampleSeries for a given Step.
The request provides an optional filter which specifies one or more PerfMetricsType to include in the result; if none returns all. The resulting PerfSampleSeries are sorted by ids.
May return any of the following canonical error codes: - NOT_FOUND - The containing Step does not exist
Args:
request: (ToolresultsProjectsHistoriesExecutionsStepsPerfSampleSeriesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListPerfSampleSeriesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'toolresults.projects.histories.executions.steps.perfSampleSeries.list',
ordered_params=[u'projectId', u'historyId', u'executionId', u'stepId'],
path_params=[u'executionId', u'historyId', u'projectId', u'stepId'],
query_params=[u'filter'],
relative_path=u'projects/{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/perfSampleSeries',
request_field='',
request_type_name=u'ToolresultsProjectsHistoriesExecutionsStepsPerfSampleSeriesListRequest',
response_type_name=u'ListPerfSampleSeriesResponse',
supports_download=False,
)
class ProjectsHistoriesExecutionsStepsTestCasesService(base_api.BaseApiService):
"""Service class for the projects_histories_executions_steps_testCases resource."""
_NAME = u'projects_histories_executions_steps_testCases'
def __init__(self, client):
super(ToolresultsV1beta3.ProjectsHistoriesExecutionsStepsTestCasesService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets details of a Test Case for a Step. Experimental test cases API. Still in active development.
May return any of the following canonical error codes:
- PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the containing Test Case does not exist
Args:
request: (ToolresultsProjectsHistoriesExecutionsStepsTestCasesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestCase) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'toolresults.projects.histories.executions.steps.testCases.get',
ordered_params=[u'projectId', u'historyId', u'executionId', u'stepId', u'testCaseId'],
path_params=[u'executionId', u'historyId', u'projectId', u'stepId', u'testCaseId'],
query_params=[],
relative_path=u'projects/{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/testCases/{testCaseId}',
request_field='',
request_type_name=u'ToolresultsProjectsHistoriesExecutionsStepsTestCasesGetRequest',
response_type_name=u'TestCase',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists Test Cases attached to a Step. Experimental test cases API. Still in active development.
May return any of the following canonical error codes:
- PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed - NOT_FOUND - if the containing Step does not exist
Args:
request: (ToolresultsProjectsHistoriesExecutionsStepsTestCasesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListTestCasesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'toolresults.projects.histories.executions.steps.testCases.list',
ordered_params=[u'projectId', u'historyId', u'executionId', u'stepId'],
path_params=[u'executionId', u'historyId', u'projectId', u'stepId'],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'projects/{projectId}/histories/{historyId}/executions/{executionId}/steps/{stepId}/testCases',
request_field='',
request_type_name=u'ToolresultsProjectsHistoriesExecutionsStepsTestCasesListRequest',
response_type_name=u'ListTestCasesResponse',
supports_download=False,
)
class ProjectsHistoriesExecutionsStepsThumbnailsService(base_api.BaseApiService):
"""Service class for the projects_histories_executions_steps_thumbnails resource."""
_NAME = u'projects_histories_executions_steps_thumbnails'
def __init__(self, client):
super(ToolresultsV1beta3.ProjectsHistoriesExecutionsStepsThumbnailsService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
r"""Lists thumbnails of images attached to a step.
May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is | |
collector
pos_Etag=[position[0]+22.5,position[1]-12.5]
pos_Ctag=[position[0]+22.5,position[1]+12.5]
else:
if isNPN:
inkDraw.line.relCoords(elem, [[7,-5],[0,-17]],[position[0]+18,position[1]-3]) # collector
inkDraw.line.relCoords(elem, [[7,5]],[position[0]+18,position[1]+3],lineStyle=lineStyleArrow) # emitter arrow
inkDraw.line.relCoords(elem, [[0,17]],[position[0]+25,position[1]+8]) # emitter
else:
inkDraw.line.relCoords(elem, [[7,-5],[0,-17]],[position[0]+18,position[1]-3]) # collector
inkDraw.line.relCoords(elem, [[-7,-5]],[position[0]+25,position[1]+8],lineStyle=lineStyleArrow) # emitter arrow
inkDraw.line.relCoords(elem, [[0,17]],[position[0]+25,position[1]+8]) # emitter
pos_Ctag=[position[0]+22.5,position[1]-12.5]
pos_Etag=[position[0]+22.5,position[1]+12.5]
if drawEnvelope:
inkDraw.circle.centerRadius(elem, centerPoint=[position[0]+22,position[1]], radius=10, offset=[0, 0], label='circle')
if drawBCEtags:
tB=inkDraw.text.latex(self,group,'B',position=[position[0]+10,position[1]-3],fontSize=self.fontSizeSmall/1.5,refPoint='cc',preambleFile=self.preambleFile,angleDeg=-angleDeg)
tC=inkDraw.text.latex(self,group,'C',position=pos_Ctag,fontSize=self.fontSizeSmall/1.5,refPoint='cc',preambleFile=self.preambleFile,angleDeg=-angleDeg)
tE=inkDraw.text.latex(self,group,'E',position=pos_Etag,fontSize=self.fontSizeSmall/1.5,refPoint='cc',preambleFile=self.preambleFile,angleDeg=-angleDeg)
if angleDeg!=0:
self.rotateElement(group,position,angleDeg)
#draw voltage drops
if drawVCE:
pos=[position[0]+25+10 ,position[1]]
self.drawVoltArrowSimple(group,pos,name=VCEname,color=self.voltageColor,angleDeg=90,invertArrows=mirrorEC,size=20.0,invertCurvatureDirection=False,extraAngleText=angleDeg)
if drawVCB:
if mirrorEC:
pos = [position[0]+12,position[1]+12]
ang = -45
else:
pos = [position[0]+12,position[1]-12]
ang = 45
self.drawVoltArrowSimple(group,pos,name=VCBname,color=self.voltageColor,angleDeg=ang,invertArrows=False,size=20.0,invertCurvatureDirection=not mirrorEC,extraAngleText=angleDeg)
if drawVBE:
if mirrorEC:
pos = [position[0]+12,position[1]-12]
ang = 45
else:
pos = [position[0]+12,position[1]+12]
ang = -45
self.drawVoltArrowSimple(group,pos,name=VBEname,color=self.voltageColor,angleDeg= ang,invertArrows=True,size=20.0,invertCurvatureDirection= mirrorEC,extraAngleText=angleDeg)
# draw terminal currents
if drawICarrow:
if mirrorEC:
self.drawCurrArrowSimple(group,[position[0]+30 ,position[1]+17.5],name=ICname,color=self.currentColor,
angleDeg=90,invertArrows=not isNPN,size=7.5,invertTextSide=True,extraAngleText=angleDeg)
else:
self.drawCurrArrowSimple(group,[position[0]+30 ,position[1]-17.5],name=ICname,color=self.currentColor,
angleDeg=90,invertArrows=isNPN,size=7.5,invertTextSide=True,extraAngleText=angleDeg)
if drawIBarrow:
self.drawCurrArrowSimple(group,[position[0]+7.5-10 ,position[1]-5],name=IBname,color=self.currentColor,
angleDeg=0,invertArrows=not isNPN,size=7.5,invertTextSide=False,extraAngleText=angleDeg)
if drawIEarrow:
if mirrorEC:
self.drawCurrArrowSimple(group,[position[0]+30 ,position[1]-17.5],name=IEname,color=self.currentColor,
angleDeg=90,invertArrows=not isNPN,size=7.5,invertTextSide=True,extraAngleText=angleDeg)
else:
self.drawCurrArrowSimple(group,[position[0]+30 ,position[1]+17.5],name=IEname,color=self.currentColor,
angleDeg=90,invertArrows=isNPN,size=7.5,invertTextSide=True,extraAngleText=angleDeg)
return group;
#---------------------------------------------
#metal-oxide-semiconductor field-effect transistor (N and P channel)
def drawTransistorMOSFET(self,parent,position=[0, 0],angleDeg=0,label='MOSFET',mirrorSD=False,
drawSGDtags=False,
drawEnvelope=False,
modeType='MOSFET-E',
gateType='P_gate',
bodyDiode=False,
drawVGS=False,
drawVDS=False,
drawVDG=False,
drawIDarrow=False,
drawISarrow=False,
drawIGarrow=False,
VGSname='V_{GS}',
VDSname='V_{SD}',
VDGname='V_{GD}',
IDname='i_d',
ISname='i_s',
IGname='i_g'):
""" draws a general Field Effect transistor
parent: parent object
label: label of the object (it can be repeated)
position: position [x,y]
angleDeg: orientation (default: 0.0)
mirrorSD: invert S and D terminals (default: False (D above, S below)
drawSGDtags: indentify SGD terminals (default: False)
drawEnvelope: draw circular envelope (default:False)
modeType: type of field effect transistor: 'MOSFET-E' (Default) 'MOSFET_P'
gateType: type of gate: 'P_gate', 'N_gate'
bodyDiode: draws body diode (MOSFET-E only)
drawVGS,drawVDS,drawVDG: draw voltage drop annotations (default: False)
drawIDarrow,drawISarrow,drawIGarrow: draw current annotations (default: False)
VGSname,VDSname,VDGname: voltage drop annotation text
IDname,ISname,IGname: current annotation text
"""
if gateType == 'P_gate':
isNgate=False
else:
isNgate=True
if modeType == 'MOSFET-E':
isEmode=True
else:
isEmode=False
group = self.createGroup(parent,label)
elem = self.createGroup(group,label)
colorBlack=inkDraw.color.defined('black')
L_arrow=2.0
markerMOS=inkDraw.marker.createMarker(self, 'MOSArrow', 'M -0.3,0 l -%f,%f l 0,-%f z'% (L_arrow*1.2, L_arrow/2.0,L_arrow), RenameMode=1,
strokeColor=colorBlack, fillColor=colorBlack, lineWidth=0.6)
lineStyleArrow = inkDraw.lineStyle.set(lineWidth=0.7, lineColor=colorBlack, markerEnd=markerMOS)
lineStyleFine = inkDraw.lineStyle.set(lineWidth=0.7, lineColor=colorBlack)
if mirrorSD:
inkDraw.line.relCoords(elem, [[0,-12],[-28,0]],[position[0]+17,position[1]+6]) #gate
if bodyDiode and isEmode:
inkDraw.line.relCoords(elem, [[0,18.75]],[position[0]+24,position[1]+5.25]) # drain line
else:
inkDraw.line.relCoords(elem, [[0,18.6]],[position[0]+24,position[1]+5.4]) # drain line
inkDraw.line.relCoords(elem, [[5,0],[0,5.25]],[position[0]+19,position[1]+5.25],lineStyle=lineStyleFine) # drain line
inkDraw.line.relCoords(elem, [[0,-20.75]],[position[0]+24,position[1]-5.25]) # source line
inkDraw.line.relCoords(elem, [[5,0],[0,5.25]],[position[0]+19,position[1]-5.25],lineStyle=lineStyleFine) # source line
inkDraw.circle.centerRadius(elem, [position[0]+24,position[1]-5.25], radius=0.4, offset=[0, 0], label='circle') # source dot
if isNgate:
inkDraw.line.relCoords(elem, [[-5,0]],[position[0]+24,position[1]+0],lineStyle=lineStyleArrow) # horizontal arrow line
else:
inkDraw.line.relCoords(elem, [[5,0]],[position[0]+19,position[1]+0],lineStyle=lineStyleArrow) # horizontal arrow line
if bodyDiode and isEmode:
inkDraw.circle.centerRadius(elem, [position[0]+24,position[1]+5.25], radius=0.4, offset=[0, 0], label='circle') # diode cathode dot
inkDraw.line.relCoords(elem, [[4,0],[0,-3.75]],[position[0]+24,position[1]+5.25],lineStyle=lineStyleFine) # diode cathode
inkDraw.line.relCoords(elem, [[4,0],[0, 3.75]],[position[0]+24,position[1]-5.25],lineStyle=lineStyleFine) # diode anode
if isNgate:
inkDraw.line.relCoords(elem, [[3,0]],[position[0]+26.5,position[1]+1.5],lineStyle=lineStyleFine) # diode cathode side line
inkDraw.line.relCoords(elem, [[3,0],[-1.5, 3],[-1.5,-3]],[position[0]+26.5,position[1]-1.5],lineStyle=lineStyleFine) # diode
else:
inkDraw.line.relCoords(elem, [[3,0]],[position[0]+26.5,position[1]-1.5],lineStyle=lineStyleFine) # diode cathode side line
inkDraw.line.relCoords(elem, [[3,0],[-1.5,-3],[-1.5, 3]],[position[0]+26.5,position[1]+1.5],lineStyle=lineStyleFine) # diode
pos_Gtag=[position[0]+9,position[1]-3]
pos_Dtag=[position[0]+26.5,position[1]+12.5]
pos_Stag=[position[0]+26.5,position[1]-12.5]
else:
pos_Gtag=[position[0]+7,position[1]-3]
pos_Dtag=[position[0]+26.5,position[1]+11.5]
pos_Stag=[position[0]+26.5,position[1]-11.5]
else:
inkDraw.line.relCoords(elem, [[0,12],[-28,0]],[position[0]+17,position[1]-6]) #gate
if bodyDiode and isEmode:
inkDraw.line.relCoords(elem, [[0,-18.75]],[position[0]+24,position[1]-5.25]) # drain line
else:
inkDraw.line.relCoords(elem, [[0,-18.6]],[position[0]+24,position[1]-5.4]) # drain line
inkDraw.line.relCoords(elem, [[5,0],[0,-5.25]],[position[0]+19,position[1]-5.25],lineStyle=lineStyleFine) # drain line
inkDraw.line.relCoords(elem, [[0,20.75]],[position[0]+24,position[1]+5.25]) # source line
inkDraw.line.relCoords(elem, [[5,0],[0,-5.25]],[position[0]+19,position[1]+5.25],lineStyle=lineStyleFine) # source line
inkDraw.circle.centerRadius(elem, [position[0]+24,position[1]+5.25], radius=0.4, offset=[0, 0], label='circle') # source dot
if isNgate:
inkDraw.line.relCoords(elem, [[-5,0]],[position[0]+24,position[1]+0],lineStyle=lineStyleArrow) # horizontal arrow line
else:
inkDraw.line.relCoords(elem, [[5,0]],[position[0]+19,position[1]+0],lineStyle=lineStyleArrow) # horizontal arrow line
if bodyDiode and isEmode:
inkDraw.circle.centerRadius(elem, [position[0]+24,position[1]-5.25], radius=0.4, offset=[0, 0], label='circle') # diode cathode dot
inkDraw.line.relCoords(elem, [[4,0],[0, 3.75]],[position[0]+24,position[1]-5.25],lineStyle=lineStyleFine) # diode cathode
inkDraw.line.relCoords(elem, [[4,0],[0,-3.75]],[position[0]+24,position[1]+5.25],lineStyle=lineStyleFine) # diode anode
if isNgate:
inkDraw.line.relCoords(elem, [[3,0]],[position[0]+26.5,position[1]-1.5],lineStyle=lineStyleFine) # diode cathode side line
inkDraw.line.relCoords(elem, [[3,0],[-1.5,-3],[-1.5,3]],[position[0]+26.5,position[1]+1.5],lineStyle=lineStyleFine) # diode
else:
inkDraw.line.relCoords(elem, [[3,0]],[position[0]+26.5,position[1]+1.5],lineStyle=lineStyleFine) # diode cathode side line
inkDraw.line.relCoords(elem, [[3,0],[-1.5,3],[-1.5,-3]],[position[0]+26.5,position[1]-1.5],lineStyle=lineStyleFine) # diode
pos_Gtag=[position[0]+9,position[1]+3]
pos_Dtag=[position[0]+26.5,position[1]-12.5]
pos_Stag=[position[0]+26.5,position[1]+12.5]
else:
pos_Gtag=[position[0]+7,position[1]+3]
pos_Dtag=[position[0]+26.5,position[1]-11.5]
pos_Stag=[position[0]+26.5,position[1]+11.5]
if isEmode:
# enhancement-mode line
inkDraw.line.relCoords(elem, [[0,3.5]],[position[0]+19,position[1]-7],lineStyle=lineStyleFine) #vertical gate line
inkDraw.line.relCoords(elem, [[0,3.5]],[position[0]+19,position[1]-1.75],lineStyle=lineStyleFine) #vertical gate line
inkDraw.line.relCoords(elem, [[0,3.5]],[position[0]+19,position[1]+3.5],lineStyle=lineStyleFine) #vertical gate line
else:
inkDraw.line.relCoords(elem, [[0,14]],[position[0]+19,position[1]-7],lineStyle=lineStyleFine) #vertical gate line
if drawEnvelope:
if bodyDiode and isEmode:
inkDraw.circle.centerRadius(elem, centerPoint=[position[0]+22,position[1]], radius=10, offset=[0, 0], label='circle')
else:
inkDraw.circle.centerRadius(elem, centerPoint=[position[0]+20,position[1]], radius=10, offset=[0, 0], label='circle')
if drawSGDtags:
tB=inkDraw.text.latex(self,group,'G',position=pos_Gtag,fontSize=self.fontSizeSmall/1.5,refPoint='cc',preambleFile=self.preambleFile,angleDeg=-angleDeg)
tC=inkDraw.text.latex(self,group,'D',position=pos_Dtag,fontSize=self.fontSizeSmall/1.5,refPoint='cc',preambleFile=self.preambleFile,angleDeg=-angleDeg)
tE=inkDraw.text.latex(self,group,'S',position=pos_Stag,fontSize=self.fontSizeSmall/1.5,refPoint='cc',preambleFile=self.preambleFile,angleDeg=-angleDeg)
if angleDeg!=0:
self.rotateElement(group,position,angleDeg)
#draw voltage drops
if drawVDS:
pos=[position[0]+25+10 ,position[1]]
self.drawVoltArrowSimple(group,pos,name=VDSname,color=self.voltageColor,angleDeg=90,
invertArrows=mirrorSD ,size=20.0,invertCurvatureDirection=False,extraAngleText=angleDeg)
if drawVGS:
if mirrorSD:
pos = [position[0]+15,position[1]-14]
ang = +19
else:
pos = [position[0]+15,position[1]+14]
ang = -19
self.drawVoltArrowSimple(group,pos,name=VGSname,color=self.voltageColor,angleDeg=ang,
invertArrows=True, size=10.0, invertCurvatureDirection=mirrorSD, extraAngleText=angleDeg)
if drawVDG:
if mirrorSD:
pos = [position[0]+10,position[1]+8]
ang = -45
else:
pos = [position[0]+10,position[1]-8]
ang = 45
self.drawVoltArrowSimple(group,pos,name=VDGname,color=self.voltageColor,angleDeg= ang,
invertArrows= False,size=20.0,invertCurvatureDirection=not mirrorSD,extraAngleText=angleDeg)
# draw terminal currents
if drawISarrow:
if mirrorSD:
pos = [position[0]+29 ,position[1]-17.5]
else:
pos = [position[0]+29 ,position[1]+17.5]
self.drawCurrArrowSimple(group,pos,name=ISname,color=self.currentColor,
angleDeg=90,invertArrows=not mirrorSD,size=7.5,invertTextSide=True,extraAngleText=angleDeg)
if drawIGarrow:
if mirrorSD:
pos = [position[0]-5 ,position[1]-11]
else:
pos = [position[0]-5 ,position[1]+11]
self.drawCurrArrowSimple(group,pos,name=IGname,color=self.currentColor,
angleDeg=0,invertArrows=False,size=7.5,invertTextSide=not mirrorSD,extraAngleText=angleDeg)
if drawIDarrow:
if mirrorSD:
pos = [position[0]+29 ,position[1]+17.5]
else:
pos = [position[0]+29 ,position[1]-17.5]
self.drawCurrArrowSimple(group,pos,name=IDname,color=self.currentColor,
angleDeg=90,invertArrows=not mirrorSD,size=7.5,invertTextSide=True,extraAngleText=angleDeg)
return group;
#---------------------------------------------
#junction gate field-effect transistor (N and P channel)
def drawTransistorJFET(self,parent,position=[0, 0],angleDeg=0,label='JFET',mirrorSD=False,
drawSGDtags=False,
drawEnvelope=False,
gateType='P_gate',
moveGate=False,
drawVGS=False,
drawVDS=False,
drawVDG=False,
drawIDarrow=False,
drawISarrow=False,
drawIGarrow=False,
VGSname='V_{GS}',
VDSname='V_{SD}',
VDGname='V_{GD}',
IDname='i_d',
ISname='i_s',
IGname='i_g'):
""" draws a junction gate field-effect transistor JFET
parent: parent object
label: label of the object (it can be repeated)
position: position [x,y]
angleDeg: orientation (default: 0.0)
mirrorSD: invert S and D terminals (default: False (D above, S below)
drawSGDtags: indentify SGD terminals (default: False)
drawEnvelope: draw circular envelope (default:False)
transistorType: type of field effect transistor: 'MOSFET-E' (Default) 'MOSFET_P'
gateType: type of gate: 'P_gate', 'N_gate'
moveGate: move gate terminar towards the source
drawVGS,drawVDS,drawVDG: draw voltage drop annotations (default: False)
drawIDarrow,drawISarrow,drawIGarrow: draw current annotations (default: False)
VGSname,VDSname,VDGname: voltage drop annotation text
IDname,ISname,IGname: current annotation text
"""
if gateType == 'P_gate':
isNgate=False
else:
isNgate=True
group = self.createGroup(parent,label)
elem = self.createGroup(group,label)
colorBlack=inkDraw.color.defined('black')
R_circle=10.0
L_arrow=2.0
markerMOS=inkDraw.marker.createMarker(self, 'MOSArrow', 'M -0.3,0 l -%f,%f l 0,-%f z'% (L_arrow*1.2, L_arrow/2.0,L_arrow), RenameMode=1,
strokeColor=colorBlack, fillColor=colorBlack, lineWidth=0.6)
lineStyleArrow = inkDraw.lineStyle.set(lineWidth=1.0, lineColor=colorBlack, markerEnd=markerMOS)
inkDraw.line.relCoords(elem, [[6,0],[0,20]],[position[0]+17,position[1]+5.0]) # source line
inkDraw.line.relCoords(elem, [[6,0],[0,-20]],[position[0]+17,position[1]-5.0]) # drain line
inkDraw.line.relCoords(elem, [[0,14]],[position[0]+17,position[1]-7],lineStyle=inkDraw.lineStyle.setSimpleBlack(lineWidth=2)) # vertical junction line
if moveGate:
if mirrorSD:
posG_Y=-5
else:
posG_Y=5
else:
posG_Y=0
theta=math.asin(posG_Y/R_circle)
P1=[10+R_circle*(1-math.cos(theta)),posG_Y]
P2=[10+R_circle-3,posG_Y]
inkDraw.line.absCoords(elem, [[-12,posG_Y],P1],position) #gate terminal
if isNgate:
inkDraw.line.absCoords(elem, [P1,P2],position,lineStyle=lineStyleArrow) #gate arrow -->
else:
inkDraw.line.absCoords(elem, [P2,P1],position,lineStyle=lineStyleArrow) #gate arrow <--
if drawEnvelope:
inkDraw.circle.centerRadius(elem, centerPoint=[position[0]+19,position[1]], radius=10, offset=[0, 0], label='circle')
if drawSGDtags:
if mirrorSD:
pos_Gtag=[position[0]+6,position[1]+3+posG_Y]
pos_Dtag=[position[0]+25.5,position[1]+11.5]
pos_Stag=[position[0]+25.5,position[1]-11.5]
else:
pos_Gtag=[position[0]+6,position[1]-3+posG_Y]
pos_Dtag=[position[0]+25.5,position[1]-11.5]
pos_Stag=[position[0]+25.5,position[1]+11.5]
tB=inkDraw.text.latex(self,group,'G',position=pos_Gtag,fontSize=self.fontSizeSmall/1.5,refPoint='cc',preambleFile=self.preambleFile,angleDeg=-angleDeg)
tC=inkDraw.text.latex(self,group,'D',position=pos_Dtag,fontSize=self.fontSizeSmall/1.5,refPoint='cc',preambleFile=self.preambleFile,angleDeg=-angleDeg)
tE=inkDraw.text.latex(self,group,'S',position=pos_Stag,fontSize=self.fontSizeSmall/1.5,refPoint='cc',preambleFile=self.preambleFile,angleDeg=-angleDeg)
if angleDeg!=0:
self.rotateElement(group,position,angleDeg)
#draw voltage drops
if drawVDS:
pos=[position[0]+25+9 ,position[1]]
self.drawVoltArrowSimple(group,pos,name=VDSname,color=self.voltageColor,angleDeg=90,
invertArrows=mirrorSD ,size=20.0,invertCurvatureDirection=False,extraAngleText=angleDeg)
if drawVGS:
if mirrorSD:
if moveGate:
pos = [position[0]+15,position[1]-13]
ang = 19
L = 10
else:
pos = [position[0]+12,position[1]-11]
ang = +30
L = 15
else:
if moveGate:
pos = [position[0]+15,position[1]+13]
ang = -19
L = 10
else:
pos = [position[0]+12,position[1]+11]
ang = -30
L = 15
self.drawVoltArrowSimple(group,pos,name=VGSname,color=self.voltageColor,angleDeg=ang,
invertArrows=True, size=L, invertCurvatureDirection=mirrorSD, extraAngleText=angleDeg)
if drawVDG:
if mirrorSD:
if moveGate:
pos = [position[0]+12,position[1]+9]
ang = -45
L = 20
else:
pos = [position[0]+12,position[1]+11]
ang = -30
L = 15
else:
if moveGate:
pos = [position[0]+12,position[1]-9]
ang = 45
L = 20
else:
pos = [position[0]+12,position[1]-11]
ang = 30
L = 15
self.drawVoltArrowSimple(group,pos,name=VDGname,color=self.voltageColor,angleDeg= ang,
invertArrows= False,size=L,invertCurvatureDirection=not mirrorSD,extraAngleText=angleDeg)
# draw terminal currents
if drawISarrow:
if mirrorSD:
pos = [position[0]+28 ,position[1]-17.5]
else:
pos = [position[0]+28 ,position[1]+17.5]
self.drawCurrArrowSimple(group,pos,name=ISname,color=self.currentColor,
angleDeg=90,invertArrows=not mirrorSD,size=7.5,invertTextSide=True,extraAngleText=angleDeg)
if drawIGarrow:
if mirrorSD:
pos = [position[0]-5 ,position[1]+posG_Y-5]
else:
pos = [position[0]-5 ,position[1]+posG_Y+5]
self.drawCurrArrowSimple(group,pos,name=IGname,color=self.currentColor,
angleDeg=0,invertArrows=False,size=7.5,invertTextSide=not mirrorSD,extraAngleText=angleDeg)
if drawIDarrow:
if mirrorSD:
pos = [position[0]+28 ,position[1]+17.5]
else:
pos = [position[0]+28 ,position[1]-17.5]
self.drawCurrArrowSimple(group,pos,name=IDname,color=self.currentColor,
angleDeg=90,invertArrows=not mirrorSD,size=7.5,invertTextSide=True,extraAngleText=angleDeg)
return group;
#---------------------------------------------
def drawSwitch2T(self,parent,position=[0, 0],value='S',label='Switch',angleDeg=0,flagOpen=True,flagDrawArrow=False,OpenCloseText=''):
""" draws a switch with two terminals only
parent: parent object
position: position [x,y]
label: label of the object (it can be repeated)
"""
group = self.createGroup(parent,label)
elem = self.createGroup(group,label)
color=inkDraw.color.defined('red')
colorBlack=inkDraw.color.defined('black')
lineStyleSign=inkDraw.lineStyle.set(lineWidth=0.7, lineColor=colorBlack, fillColor=colorBlack)
if flagOpen:
inkDraw.line.relCoords(elem, [[15.5,0],[20,-8]],position)
inkDraw.line.relCoords(elem, [[-15.5,0]],[position[0]+50,position[1]])
else:
inkDraw.line.relCoords(elem, [[15.5,0],[20,0]],position)
inkDraw.line.relCoords(elem, [[-15.5,0]],[position[0]+50,position[1]])
inkDraw.circle.centerRadius(elem, [position[0]+35,position[1]], 1.0, offset=[0,0],lineStyle=lineStyleSign)
inkDraw.circle.centerRadius(elem, [position[0]+15,position[1]], 1.0, offset=[0,0],lineStyle=lineStyleSign)
[arrowStart,arrowEnd] = inkDraw.marker.createArrow1Marker(self,'arrowSwitch',RenameMode=0,scale=0.25,strokeColor=color,fillColor=color)
if flagDrawArrow:
if OpenCloseText:
pos_text=[position[0]+25,position[1]+4+self.textOffset]
if inkDraw.useLatex:
OpenCloseText='$'+OpenCloseText +'$'
inkDraw.text.latex(self,group,OpenCloseText,pos_text,fontSize=self.fontSize,refPoint='tc',preambleFile=self.preambleFile)
if flagOpen:
lineStyle = inkDraw.lineStyle.set(lineColor=color,markerEnd=arrowEnd);
else:
lineStyle = inkDraw.lineStyle.set(lineColor=color,markerStart=arrowStart);
inkDraw.arc.startEndRadius(group, [0,-5], [0,5], 12, [position[0]+24,position[1]],lineStyle=lineStyle,flagRightOf=False)
if flagOpen:
pos_text=[position[0]+25,position[1]-6-self.textOffset]
else:
pos_text=[position[0]+25,position[1]-6-self.textOffset]
if value:
if inkDraw.useLatex:
value='$'+value +'$'
inkDraw.text.latex(self,group,value,pos_text,fontSize=self.fontSize,refPoint='bc',preambleFile=self.preambleFile)
if angleDeg!=0:
self.rotateElement(group,position,angleDeg)
return group;
#---------------------------------------------
def drawVoltArrowSimple(self,parent,position,label='arrowV',name='v',color=inkDraw.color.defined('black'),
angleDeg=0,invertArrows=False,size=20.0,invertCurvatureDirection=False,extraAngleText=0.0):
""" draws a voltage drop arrow
parent: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.