ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b409c8b27a5895bf272262b26cecfc29a3f23969 | from hvm import constants
from hvm.exceptions import (
InvalidJumpDestination,
InvalidInstruction,
Halt,
)
from hvm.vm.opcode_values import (
JUMPDEST,
)
def stop(computation):
raise Halt('STOP')
def jump(computation):
jump_dest = computation.stack_pop(type_hint=constants.UINT256)
computation.code.pc = jump_dest
next_opcode = computation.code.peek()
if next_opcode != JUMPDEST:
raise InvalidJumpDestination("Invalid Jump Destination")
if not computation.code.is_valid_opcode(jump_dest):
raise InvalidInstruction("Jump resulted in invalid instruction")
def jumpi(computation):
jump_dest, check_value = computation.stack_pop(num_items=2, type_hint=constants.UINT256)
if check_value:
computation.code.pc = jump_dest
next_opcode = computation.code.peek()
if next_opcode != JUMPDEST:
raise InvalidJumpDestination("Invalid Jump Destination")
if not computation.code.is_valid_opcode(jump_dest):
raise InvalidInstruction("Jump resulted in invalid instruction")
def jumpdest(computation):
pass
def pc(computation):
pc = max(computation.code.pc - 1, 0)
computation.stack_push(pc)
def gas(computation):
gas_remaining = computation.get_gas_remaining()
computation.stack_push(gas_remaining)
|
py | b409c8f311e0cc22d37a0b87b202ddae92507879 | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
# pylint: disable=bad-continuation
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
from azext_kusto.generated._client_factory import (
cf_cluster,
cf_cluster_principal_assignment,
cf_database,
cf_attached_database_configuration,
cf_managed_private_endpoint,
cf_database_principal_assignment,
cf_script,
cf_private_endpoint_connection,
cf_private_link_resource,
cf_data_connection,
cf_operation_result,
cf_operation_result_location,
)
kusto_attached_database_configuration = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._attached_database_configurations_operations#AttachedDatabaseConfigurationsOperations.{}',
client_factory=cf_attached_database_configuration,
)
kusto_cluster = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._clusters_operations#ClustersOperations.{}',
client_factory=cf_cluster,
)
kusto_cluster_principal_assignment = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._cluster_principal_assignments_operations#ClusterPrincipalAssignmentsOperations.{}',
client_factory=cf_cluster_principal_assignment,
)
kusto_data_connection = CliCommandType(
operations_tmpl=(
'azext_kusto.vendored_sdks.kusto.operations._data_connections_operations#DataConnectionsOperations.{}'
),
client_factory=cf_data_connection,
)
kusto_database = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._databases_operations#DatabasesOperations.{}',
client_factory=cf_database,
)
kusto_database_principal_assignment = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._database_principal_assignments_operations#DatabasePrincipalAssignmentsOperations.{}',
client_factory=cf_database_principal_assignment,
)
kusto_managed_private_endpoint = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._managed_private_endpoints_operations#ManagedPrivateEndpointsOperations.{}',
client_factory=cf_managed_private_endpoint,
)
kusto_operation_result = CliCommandType(
operations_tmpl=(
'azext_kusto.vendored_sdks.kusto.operations._operations_results_operations#OperationsResultsOperations.{}'
),
client_factory=cf_operation_result,
)
kusto_operation_result_location = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._operations_results_location_operations#OperationsResultsLocationOperations.{}',
client_factory=cf_operation_result_location,
)
kusto_private_endpoint_connection = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._private_endpoint_connections_operations#PrivateEndpointConnectionsOperations.{}',
client_factory=cf_private_endpoint_connection,
)
kusto_private_link_resource = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._private_link_resources_operations#PrivateLinkResourcesOperations.{}',
client_factory=cf_private_link_resource,
)
kusto_script = CliCommandType(
operations_tmpl='azext_kusto.vendored_sdks.kusto.operations._scripts_operations#ScriptsOperations.{}',
client_factory=cf_script,
)
def load_command_table(self, _):
with self.command_group(
'kusto attached-database-configuration',
kusto_attached_database_configuration,
client_factory=cf_attached_database_configuration,
) as g:
g.custom_command('list', 'kusto_attached_database_configuration_list')
g.custom_show_command(
'show', 'kusto_attached_database_configuration_show')
g.custom_command(
'create', 'kusto_attached_database_configuration_create', supports_no_wait=True)
g.generic_update_command(
'update',
supports_no_wait=True,
custom_func_name='kusto_attached_database_configuration_update',
setter_name='begin_create_or_update',
)
g.custom_command(
'delete', 'kusto_attached_database_configuration_delete', supports_no_wait=True, confirmation=True
)
g.custom_wait_command(
'wait', 'kusto_attached_database_configuration_show')
with self.command_group('kusto cluster', kusto_cluster, client_factory=cf_cluster) as g:
g.custom_command('list', 'kusto_cluster_list')
g.custom_show_command('show', 'kusto_cluster_show')
g.custom_command('create', 'kusto_cluster_create',
supports_no_wait=True)
g.custom_command('update', 'kusto_cluster_update',
supports_no_wait=True)
g.custom_command('delete', 'kusto_cluster_delete',
supports_no_wait=True, confirmation=True)
g.custom_command('add-language-extension',
'kusto_cluster_add_language_extension', supports_no_wait=True)
g.custom_command('detach-follower-database',
'kusto_cluster_detach_follower_database', supports_no_wait=True)
g.custom_command('diagnose-virtual-network',
'kusto_cluster_diagnose_virtual_network', supports_no_wait=True)
g.custom_command('list-follower-database',
'kusto_cluster_list_follower_database')
g.custom_command('list-language-extension',
'kusto_cluster_list_language_extension')
g.custom_command(
'list-outbound-network-dependency-endpoint', 'kusto_cluster_list_outbound_network_dependency_endpoint'
)
g.custom_command('list-sku', 'kusto_cluster_list_sku')
g.custom_command('remove-language-extension',
'kusto_cluster_remove_language_extension', supports_no_wait=True)
g.custom_command('start', 'kusto_cluster_start', supports_no_wait=True)
g.custom_command('stop', 'kusto_cluster_stop', supports_no_wait=True)
g.custom_wait_command('wait', 'kusto_cluster_show')
with self.command_group(
'kusto cluster-principal-assignment',
kusto_cluster_principal_assignment,
client_factory=cf_cluster_principal_assignment,
) as g:
g.custom_command('list', 'kusto_cluster_principal_assignment_list')
g.custom_show_command(
'show', 'kusto_cluster_principal_assignment_show')
g.custom_command(
'create', 'kusto_cluster_principal_assignment_create', supports_no_wait=True)
g.generic_update_command(
'update',
supports_no_wait=True,
custom_func_name='kusto_cluster_principal_assignment_update',
setter_name='begin_create_or_update',
)
g.custom_command(
'delete', 'kusto_cluster_principal_assignment_delete', supports_no_wait=True, confirmation=True
)
g.custom_wait_command(
'wait', 'kusto_cluster_principal_assignment_show')
with self.command_group('kusto data-connection', kusto_data_connection, client_factory=cf_data_connection) as g:
g.custom_command('list', 'kusto_data_connection_list')
g.custom_show_command('show', 'kusto_data_connection_show')
g.custom_command(
'event-grid create', 'kusto_data_connection_event_grid_create', supports_no_wait=True)
g.custom_command(
'event-hub create', 'kusto_data_connection_event_hub_create', supports_no_wait=True)
g.custom_command(
'iot-hub create', 'kusto_data_connection_iot_hub_create', supports_no_wait=True)
g.custom_command(
'event-grid update', 'kusto_data_connection_event_grid_update', supports_no_wait=True)
g.custom_command(
'event-hub update', 'kusto_data_connection_event_hub_update', supports_no_wait=True)
g.custom_command(
'iot-hub update', 'kusto_data_connection_iot_hub_update', supports_no_wait=True)
g.custom_command('delete', 'kusto_data_connection_delete',
supports_no_wait=True, confirmation=True)
g.custom_command(
'event-grid data-connection-validation',
'kusto_data_connection_event_grid_data_connection_validation',
supports_no_wait=True,
)
g.custom_command(
'event-hub data-connection-validation',
'kusto_data_connection_event_hub_data_connection_validation',
supports_no_wait=True,
)
g.custom_command(
'iot-hub data-connection-validation',
'kusto_data_connection_iot_hub_data_connection_validation',
supports_no_wait=True,
)
g.custom_wait_command('wait', 'kusto_data_connection_show')
with self.command_group('kusto database', kusto_database, client_factory=cf_database) as g:
g.custom_command('list', 'kusto_database_list')
g.custom_show_command('show', 'kusto_database_show')
g.custom_command('create', 'kusto_database_create',
supports_no_wait=True)
g.custom_command('update', 'kusto_database_update',
supports_no_wait=True)
g.custom_command('delete', 'kusto_database_delete',
supports_no_wait=True, confirmation=True)
g.custom_command('add-principal', 'kusto_database_add_principal')
g.custom_command('list-principal', 'kusto_database_list_principal')
g.custom_command('remove-principal', 'kusto_database_remove_principal')
g.custom_wait_command('wait', 'kusto_database_show')
with self.command_group(
'kusto database-principal-assignment',
kusto_database_principal_assignment,
client_factory=cf_database_principal_assignment,
) as g:
g.custom_command('list', 'kusto_database_principal_assignment_list')
g.custom_show_command(
'show', 'kusto_database_principal_assignment_show')
g.custom_command(
'create', 'kusto_database_principal_assignment_create', supports_no_wait=True)
g.generic_update_command(
'update',
supports_no_wait=True,
custom_func_name='kusto_database_principal_assignment_update',
setter_name='begin_create_or_update',
)
g.custom_command(
'delete', 'kusto_database_principal_assignment_delete', supports_no_wait=True, confirmation=True
)
g.custom_wait_command(
'wait', 'kusto_database_principal_assignment_show')
with self.command_group(
'kusto managed-private-endpoint', kusto_managed_private_endpoint, client_factory=cf_managed_private_endpoint
) as g:
g.custom_command('list', 'kusto_managed_private_endpoint_list')
g.custom_show_command('show', 'kusto_managed_private_endpoint_show')
g.custom_command(
'create', 'kusto_managed_private_endpoint_create', supports_no_wait=True)
g.custom_command(
'update', 'kusto_managed_private_endpoint_update', supports_no_wait=True)
g.custom_command('delete', 'kusto_managed_private_endpoint_delete',
supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'kusto_managed_private_endpoint_show')
with self.command_group('kusto operation-result', kusto_operation_result, client_factory=cf_operation_result) as g:
g.custom_show_command('show', 'kusto_operation_result_show')
with self.command_group(
'kusto operation-result-location', kusto_operation_result_location, client_factory=cf_operation_result_location
) as g:
g.custom_show_command('show', 'kusto_operation_result_location_show')
with self.command_group(
'kusto private-endpoint-connection',
kusto_private_endpoint_connection,
client_factory=cf_private_endpoint_connection,
) as g:
g.custom_command('list', 'kusto_private_endpoint_connection_list')
g.custom_show_command('show', 'kusto_private_endpoint_connection_show')
g.custom_command(
'create', 'kusto_private_endpoint_connection_create', supports_no_wait=True)
g.generic_update_command(
'update',
supports_no_wait=True,
custom_func_name='kusto_private_endpoint_connection_update',
setter_name='begin_create_or_update',
)
g.custom_command('delete', 'kusto_private_endpoint_connection_delete',
supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'kusto_private_endpoint_connection_show')
with self.command_group(
'kusto private-link-resource', kusto_private_link_resource, client_factory=cf_private_link_resource
) as g:
g.custom_command('list', 'kusto_private_link_resource_list')
g.custom_show_command('show', 'kusto_private_link_resource_show')
with self.command_group('kusto script', kusto_script, client_factory=cf_script) as g:
g.custom_command('list', 'kusto_script_list')
g.custom_show_command('show', 'kusto_script_show')
g.custom_command('create', 'kusto_script_create',
supports_no_wait=True)
g.custom_command('update', 'kusto_script_update',
supports_no_wait=True)
g.custom_command('delete', 'kusto_script_delete',
supports_no_wait=True, confirmation=True)
g.custom_wait_command('wait', 'kusto_script_show')
with self.command_group('kusto', is_experimental=True):
pass
|
py | b409c9866200545a5119893d2d4700535c5eac18 | import time
from deepdiff import DeepDiff
import uuid
from .tuq import QueryTests
from collection.collections_stats import CollectionsStats
from couchbase_helper.cluster import Cluster
from lib.couchbase_helper.documentgenerator import SDKDataLoader
from membase.api.rest_client import RestConnection
class QueryArrayFlatteningTests(QueryTests):
def setUp(self):
super(QueryArrayFlatteningTests, self).setUp()
self.log.info("============== QueryArrayFlatteningTests setup has started ==============")
self.bucket_name = self.input.param("bucket", self.default_bucket_name)
self.active_resident_threshold = self.input.param("resident_ratio", 100)
self.kv_dataset = self.input.param("kv_dataset", "Hotel")
self.num_items = self.input.param("num_items", 10000)
self.expiry = self.input.param("expiry", 0)
self.explicit = self.input.param("explicit", False)
self.use_all = self.input.param("use_all", False)
self.use_unnest = self.input.param("use_unnest", False)
self.any_every = self.input.param("any_every", False)
self.rollback = self.input.param("rollback", False)
self.conn = RestConnection(self.master)
self.stat = CollectionsStats(self.master)
self.cbqpath = '{0}cbq -quiet -u {1} -p {2} -e=localhost:8093 '.format(self.path, self.username, self.password)
self.cluster = Cluster()
self.load_data()
self.log.info("============== QueryArrayFlatteningTests setup has completed ==============")
def suite_setUp(self):
super(QueryArrayFlatteningTests, self).suite_setUp()
self.log.info("============== QueryArrayFlatteningTests suite_setup has started ==============")
self.log.info("============== QueryArrayFlatteningTests suite_setup has completed ==============")
def tearDown(self):
self.log.info("============== QueryArrayFlatteningTests tearDown has started ==============")
self.log.info("============== QueryArrayFlatteningTests tearDown has completed ==============")
super(QueryArrayFlatteningTests, self).tearDown()
def suite_tearDown(self):
self.log.info("============== QueryArrayFlatteningTests suite_tearDown has started ==============")
self.log.info("============== QueryArrayFlatteningTests suite_tearDown has completed ==============")
super(QueryArrayFlatteningTests, self).suite_tearDown()
##############################################################################################
#
# Negative Tests
##############################################################################################
'''Flatten index needs at least one argument, try passing in no arguments'''
def test_flatten_negative(self):
try:
self.run_cbq_query(query="CREATE INDEX idx1 ON default(DISTINCT ARRAY FLATTEN_KEYS() FOR r IN reviews END,country,email)")
self.fail()
except Exception as ex:
self.assertTrue("Number of arguments to function flatten_keys must be between 1 and 32" in str(ex), "Exception is not what was expected, exception should have been a syntax error! Exception is {0}".format(str(ex)))
'''Multiple array keys are not allowed in one index, test this'''
def test_flatten_multiple_array_keys(self):
try:
self.run_cbq_query(
query="CREATE INDEX idx2 ON default(DISTINCT ARRAY(ALL ARRAY FLATTEN_KEYS(rting.Cleanliness,rting.Rooms) "
"FOR rting IN r.ratings END) FOR r IN reviews END, DISTINCT ARRAY flatten_keys(r.author) FOR r IN "
"reviews END, free_parking)")
self.fail()
except Exception as ex:
self.assertTrue("Multiple expressions with ALL are found. Only one array expression is supported per index." in str(ex),
"Exception is not what was expected. Exception is {0}".format(
str(ex)))
'''Every will not pick up the index'''
def test_flatten_every(self):
self.run_cbq_query(
query="create index idx1 on default(ALL ARRAY FLATTEN_KEYS(r.ratings.Rooms,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default AS d WHERE EVERY r IN d.reviews SATISFIES r.ratings.Rooms = 3 and r.ratings.Cleanliness > 1 END AND free_parking = True")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['index'] == '#primary',
"The correct index is not being used or the plan is different than expected! Expected primary got {0}".format(
explain_results))
'''Unnest should not work unless array index is leading key'''
def test_flatten_unnest_non_leading(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`(`email`,(distinct (array flatten_keys(((`r`.`ratings`).`Rooms`), ((`r`.`ratings`).`Overall`)) for `r` in `reviews` end)))")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['index'] == '#primary',
"The correct index is not being used or the plan is different than expected! Expected primary got {0}".format(
explain_results))
'''Try to unnest an object'''
def test_flatten_unnest_object(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`(`email`,(distinct (array flatten_keys(((`r`.`ratings`).`Rooms`), ((`r`.`ratings`).`Overall`)) for `r` in `reviews` end)))")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d UNNEST d.reviews AS r UNNEST r.ratings AS s WHERE s.Rooms > 1 AND s.Overall < 5")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['index'] == '#primary',
"The correct index is not being used or the plan is different than expected! Expected primary got {0}".format(
explain_results))
'''Use an unnest that doesn't use elements in the flatten, should ignore the index'''
def test_flatten_unnest_negative(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)))")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['index'] == '#primary',
"The correct index is not being used or the plan is different than expected! Expected primary got {0}".format(
explain_results))
'''Flatten index takes between 1-32 arguments, try with 33 arguments'''
def test_flatten_arguments(self):
try:
self.run_cbq_query(query="CREATE INDEX idx2 ON default(DISTINCT ARRAY FLATTEN_KEYS(r.field1,r.field1,"
"r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,"
"r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,"
"r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,"
"r.field1,r.field1,r.field1,r.field1) FOR r IN reviews END,country,email);")
self.fail()
except Exception as ex:
self.assertTrue("Number of arguments to function flatten_keys (near line 1, column 59) must be between 1 and 32"
in str(ex), "Exception is not what was expected, exception should have been a syntax error! "
"Exception is {0}".format(str(ex)))
''' We expect it not to pick up this index since its not using any of the fields '''
def test_flatten_no_fields(self):
self.run_cbq_query(
query="create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author) FOR r IN reviews END,country,email)")
query = "EXPLAIN SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.ratings.Cleanliness > 1 END"
if self.any_every:
query = query.replace("ANY", "ANY AND EVERY")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query=query)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['index'] == '#primary',
"The correct index is not being used or the plan is different than expected! Expected primary got {0}".format(
explain_results))
##############################################################################################
#
# General Tests
##############################################################################################
'''Flatten index takes between 1-32 arguments, try with 32 arguments'''
def test_flatten_max_arguments(self):
actual_results= self.run_cbq_query(query="CREATE INDEX idx2 ON default(DISTINCT ARRAY FLATTEN_KEYS(r.field1,r.field1,"
"r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,"
"r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,"
"r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,r.field1,"
"r.field1,r.field1,r.field1) FOR r IN reviews END,country,email);")
self.assertTrue(actual_results['status'] == 'success', "Index was not successfully created! {0}".format(actual_results))
'''Verify a basic query that uses flatten against primary index'''
def test_flatten_basic(self):
self.run_cbq_query(query="create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author) FOR r IN reviews END,country,email)")
if self.use_unnest:
query = "SELECT * FROM default AS d unnest reviews as r WHERE r.author LIKE 'M%'"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'M%'"
else:
query = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' END"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' END"
if self.any_every:
query = query.replace("ANY", "ANY AND EVERY")
primary_query = primary_query.replace("ANY", "ANY AND EVERY")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
'''Try the asc desc keywords in index creation'''
def test_flatten_asc_desc(self):
self.run_cbq_query(query="create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author ASC,r.ratings.Cleanliness DESC) FOR r IN reviews END, email, free_parking, country)")
if self.use_unnest:
query = "SELECT * FROM default AS d unnest reviews as r WHERE r.author LIKE 'M%' and r.ratings.Cleanliness > 1 AND free_parking = True AND country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'M%' and r.ratings.Cleanliness > 1 AND free_parking = True AND country is not null"
else:
query = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND country is not null"
if self.any_every:
query = query.replace("ANY", "ANY AND EVERY")
primary_query = primary_query.replace("ANY", "ANY AND EVERY")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
'''Test flatten key with all keyword instead of distinct'''
def test_flatten_all(self):
self.run_cbq_query(
query="create index idx1 on default(ALL ARRAY FLATTEN_KEYS(r.ratings.Rooms,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)")
if self.use_unnest:
query = "SELECT * FROM default AS d unnest reviews as r WHERE r.ratings.Rooms = 3 and r.ratings.Cleanliness > 1" \
"AND d.free_parking = True AND d.country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.ratings.Rooms = 3 and r.ratings.Cleanliness > 1" \
"AND d.free_parking = True AND d.country is not null"
else:
query = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.ratings.Rooms = 3 and r.ratings.Cleanliness > 1 END " \
"AND free_parking = True AND country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews " \
"SATISFIES r.ratings.Rooms = 3 and r.ratings.Cleanliness > 1 END AND free_parking = True " \
"AND country is not null"
if self.any_every:
query = query.replace("ANY", "ANY AND EVERY")
primary_query = primary_query.replace("ANY", "ANY AND EVERY")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
'''Test flatten key index with an array that contains distinct keyword and another array that contains all keyword'''
def test_flatten_all_distinct(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((ALL (ARRAY(DISTINCT (ARRAY flatten_keys(n,v) FOR n:v IN (`r`.`ratings`) END)) FOR `r` IN `reviews` END)))")
query = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
def test_flatten_all_all(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((ALL (ARRAY(ALL (ARRAY flatten_keys(n,v) FOR n:v IN (`r`.`ratings`) END)) FOR `r` IN `reviews` END)))")
query = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
def test_flatten_distinct_all(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((DISTINCT (ARRAY(ALL (ARRAY flatten_keys(n,v) FOR n:v IN (`r`.`ratings`) END)) FOR `r` IN `reviews` END)))")
query = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
'''Test flatten with array as non-leading key and some fields not used'''
def test_flatten_non_leading(self):
self.run_cbq_query(query="create index idx1 on default(country, DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND country is not null")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND country is not null")
expected_results = self.run_cbq_query(query="SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND country is not null")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
def test_flatten_field_value(self):
self.run_cbq_query(query="create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND country is not null")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND country is not null")
expected_results = self.run_cbq_query(query="SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND country is not null")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test flatten with array as leading key and some fields not used'''
def test_flatten_skip_keys_leading(self):
self.run_cbq_query(query="create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)")
if self.use_unnest:
query = "SELECT * FROM default AS d unnest reviews as r WHERE r.author LIKE 'M%' and r.ratings.Cleanliness > 1" \
"AND d.free_parking = True AND d.country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'M%' and r.ratings.Cleanliness > 1" \
"AND d.free_parking = True AND d.country is not null"
else:
query = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END " \
"AND free_parking = True AND country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' " \
"and r.ratings.Cleanliness > 1 END AND free_parking = True AND country is not null"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
'''Test what happens when you have an array with ALL/DISTINCT flatten_keys(v1,v2) but query contains any (v2,1)'''
def test_flatten_array_ordering(self):
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)"
if self.use_unnest:
query = "SELECT SUM( r.ratings.Cleanliness) FROM default AS d unnest reviews as r WHERE r.author LIKE 'N%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = False AND d.email is not missing GROUP BY r.rating.Cleanliness,r.author"
primary_query = "SELECT SUM( r.ratings.Cleanliness) FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'N%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = False AND d.email is not missing GROUP BY r.rating.Cleanliness,r.author"
else:
query = "SELECT free_parking, email FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'N%' and r.ratings.Cleanliness = 3 END AND free_parking = True AND email is not missing GROUP BY free_parking, email"
primary_query = "SELECT free_parking, email FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'N%' and r.ratings.Cleanliness = 3 END AND free_parking = True AND email is not missing GROUP BY free_parking,email"
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
'''Teset partial index'''
def test_flatten_partial_index(self):
self.run_cbq_query(
query="create index idx1 on default(ALL ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking) where free_parking = True")
if self.use_unnest:
query = "SELECT * FROM default AS d unnest reviews as r WHERE r.author LIKE 'M%' and r.ratings.Cleanliness > 1 " \
"AND d.free_parking = True AND d.country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'M%' and r.ratings.Cleanliness > 1 " \
"AND d.free_parking = True AND d.country is not null"
query2 = "SELECT * FROM default AS d unnest reviews as r WHERE r.author LIKE 'M%' and r.ratings.Cleanliness > 1 AND d.free_parking = False"
else:
query = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 " \
"END AND free_parking = True AND country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author " \
"LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND country is not null"
query2 = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = False"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
# Ensure partial index is not selected when it does not apply
explain_results = self.run_cbq_query(query="EXPLAIN " + query2)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['index'] == '#primary',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
'''Test out using when clause in array index key, then use queries that fall inside and outside the when clause, equality and comparators'''
def test_flatten_when(self):
self.run_cbq_query(query="create index idx1 on default(country, DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews when r.ratings.Cleanliness < 3 END, email, free_parking)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES CONTAINS(r.author,'M') and r.ratings.Cleanliness < 2 END AND free_parking = True AND country is not null")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES CONTAINS(r.author,'M') and r.ratings.Cleanliness < 2 END AND free_parking = True AND country is not null")
expected_results = self.run_cbq_query(query="SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES CONTAINS(r.author,'M') and r.ratings.Cleanliness < 2 END AND free_parking = True AND country is not null")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
explain_results = self.run_cbq_query(query="EXPLAIN SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES CONTAINS(r.author,'M') and r.ratings.Cleanliness > 3 END AND free_parking = True AND country is not null")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES CONTAINS(r.author,'M') and r.ratings.Cleanliness > 3 END AND free_parking = True AND country is not null")
expected_results = self.run_cbq_query(query="SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES CONTAINS(r.author,'M') and r.ratings.Cleanliness > 3 END AND free_parking = True AND country is not null")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
explain_results = self.run_cbq_query(query="EXPLAIN SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES CONTAINS(r.author,'M') and r.ratings.Cleanliness = 4 END AND free_parking = True AND country is not null")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES CONTAINS(r.author,'M') and r.ratings.Cleanliness = 4 END AND free_parking = True AND country is not null")
expected_results = self.run_cbq_query(query="SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES CONTAINS(r.author,'M') and r.ratings.Cleanliness = 4 END AND free_parking = True AND country is not null")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test array_flatten index with array key as leading key, index should be ignored when the when clause is false'''
def test_flatten_when_leading(self):
self.run_cbq_query(query="create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews when r.ratings.Cleanliness < 3 END, country, email, free_parking)")
if self.use_unnest:
query = "SELECT * FROM default AS d unnest reviews as r WHERE CONTAINS(r.author,'M') and " \
"r.ratings.Cleanliness < 2 AND d.free_parking = True AND d.country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE CONTAINS(r.author,'M') and " \
"r.ratings.Cleanliness < 2 AND d.free_parking = True AND d.country is not null"
query2 = "EXPLAIN SELECT * FROM default AS d unnest reviews as r WHERE CONTAINS(r.author,'M') and r.ratings.Cleanliness > 3 AND d.free_parking = True AND d.country is not null"
query3 = "EXPLAIN SELECT * FROM default AS d unnest reviews as r WHERE CONTAINS(r.author,'M') and r.ratings.Cleanliness = 4 AND d.free_parking = True AND d.country is not null"
else:
query = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES CONTAINS(r.author,'M') and r.ratings.Cleanliness < 2 END AND free_parking = True AND country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES CONTAINS(r.author,'M') and r.ratings.Cleanliness < 2 END AND free_parking = True AND country is not null"
query2 = "EXPLAIN SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES CONTAINS(r.author,'M') and r.ratings.Cleanliness > 3 END AND free_parking = True AND country is not null"
query3 = "EXPLAIN SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES CONTAINS(r.author,'M') and r.ratings.Cleanliness = 4 END AND free_parking = True AND country is not null"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
# These two queries should not pick up the index
explain_results = self.run_cbq_query(query=query2)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['index'] == '#primary',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
explain_results = self.run_cbq_query(query=query3)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['index'] == '#primary',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
def test_flatten_advise(self):
if self.use_unnest:
query = "SELECT * FROM default AS d unnest reviews as r WHERE r.author LIKE 'M%' AND r.ratings.Cleanliness > 1 AND d.free_parking = TRUE AND d.country IS NOT NULL"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'M%' AND r.ratings.Cleanliness > 1 AND d.free_parking = TRUE AND d.country IS NOT NULL"
else:
query = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' AND r.ratings.Cleanliness > 1 END AND free_parking = TRUE AND country IS NOT NULL"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' AND r.ratings.Cleanliness > 1 END AND free_parking = TRUE AND country IS NOT NULL"
advise_results = self.run_cbq_query(query="ADVISE " + query)
self.assertTrue("FLATTEN_KEYS" in str(advise_results), "Advisor should've advised an index with flatten_keys but it did not, advisor output {0}".format(advise_results))
create_idx_statement = advise_results['results'][0]['advice']['adviseinfo']['recommended_indexes']['indexes'][0]['index_statement']
idx_name = advise_results['results'][0]['advice']['adviseinfo']['recommended_indexes']['indexes'][0]['index_statement'].split("INDEX")[1].split("ON")[0].strip()
self.run_cbq_query(query=create_idx_statement)
explain_results = self.run_cbq_query(
query="EXPLAIN " + query)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == idx_name,
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
def test_flatten_advise_equivalent(self):
self.run_cbq_query(query="create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author) FOR r IN reviews END,country,email)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT * FROM default "
"WHERE country = 'Norfolk Island' and email = '[email protected]' "
"AND ANY r IN reviews SATISFIES r.author = 'Martin Feest' END")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(explain_results))
advise_results = self.run_cbq_query(
query="ADVISE SELECT * FROM default WHERE country = 'Norfolk Island' and email = '[email protected]' "
"AND ANY r IN reviews SATISFIES r.author = 'Martin Feest' END")
self.assertTrue(advise_results['results'][0]['advice']['adviseinfo']['recommended_indexes'] == "No index recommendation at this time.", "There shouldn't be a recommended index! {0}".format(advise_results))
def test_flatten_unnest_any(self):
self.run_cbq_query(query="CREATE INDEX idx1 ON default(public_likes, DISTINCT ARRAY flatten_keys(r.ratings.Cleanliness,r.ratings.Rooms,r.author) FOR r IN reviews END, free_parking)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT * FROM default AS d UNNEST d.public_likes p WHERE p "
"LIKE 'R%' AND ANY r IN d.reviews SATISFIES r.author LIKE '%m' AND "
"(r.ratings.Cleanliness >= 1 OR r.ratings.Rooms <= 3) END AND d.free_parking = TRUE")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(explain_results))
query_results = self.run_cbq_query(query="SELECT * FROM default AS d UNNEST d.public_likes p WHERE p "
"LIKE 'R%' AND ANY r IN d.reviews SATISFIES r.author LIKE '%m' AND "
"(r.ratings.Cleanliness >= 1 OR r.ratings.Rooms <= 3) END AND d.free_parking = TRUE")
expected_results = self.run_cbq_query(query="SELECT * FROM default AS d USE INDEX (`#primary`) UNNEST d.public_likes p WHERE p "
"LIKE 'R%' AND ANY r IN d.reviews SATISFIES r.author LIKE '%m' AND "
"(r.ratings.Cleanliness >= 1 OR r.ratings.Rooms <= 3) END AND d.free_parking = TRUE")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
def test_flatten_or(self):
self.run_cbq_query(
query="CREATE INDEX idx1 ON default(public_likes, DISTINCT ARRAY flatten_keys(r.ratings.Cleanliness,r.ratings.Rooms,r.author) FOR r IN reviews END, free_parking)")
query = "SELECT * FROM default AS d UNNEST d.public_likes p WHERE p " \
"LIKE 'R%' AND ANY r IN d.reviews SATISFIES r.author LIKE '%m' AND " \
"(r.ratings.Cleanliness >= 1 OR r.ratings.Rooms <= 3) END OR d.free_parking = TRUE"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) UNNEST d.public_likes p WHERE p " \
"LIKE 'R%' AND ANY r IN d.reviews SATISFIES r.author LIKE '%m' AND " \
"(r.ratings.Cleanliness >= 1 OR r.ratings.Rooms <= 3) END OR d.free_parking = TRUE"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
def test_flatten_prepared(self):
self.run_cbq_query(query="delete from system:prepareds")
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.date,r.ratings.Overall) FOR r IN reviews END, email)"
if self.use_unnest:
prepare_query = "PREPARE p1 AS SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing"
query = "SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing"
primary_query = "SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing"
else:
prepare_query = "PREPARE p1 AS SELECT * FROM default d WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END"
query = "SELECT * FROM default d WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END"
primary_query = "SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END"
self.run_cbq_query(query=create_query)
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(explain_results))
prepare_results = self.run_cbq_query(query=prepare_query)
self.assertTrue(prepare_results['status'] == "success")
time.sleep(5)
node_prepared_name = self.run_cbq_query(query='select * from system:prepareds where name = "p1"')
prepareds = self.run_cbq_query(query="select * from system:prepareds")
self.assertTrue(node_prepared_name['metrics']['resultCount'] == 2, "There should be two enteries for p1 check prepareds {0}".format(prepareds))
execute_results = self.run_cbq_query(query="execute p1")
expected_results = self.run_cbq_query(query=primary_query)
diffs = DeepDiff(execute_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
def test_flatten_cte(self):
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.date,r.ratings.Overall) FOR r IN reviews END, email)"
if self.use_unnest:
query = "with emails as (SELECT raw d.email FROM default d UNNEST d.reviews AS r WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing) " \
"select * from default d UNNEST d.reviews as r where r.ratings.Overall BETWEEN 1 and 3 and r.date is not missing AND d.email in emails"
primary_query = "with emails as (SELECT raw d.email FROM default d UNNEST d.reviews AS r WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing) " \
"select * from default d USE INDEX (`#primary`) UNNEST d.reviews as r where r.ratings.Overall BETWEEN 1 and 3 and r.date is not missing AND d.email in emails"
else:
query = "WITH emails as (SELECT raw email FROM default d WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END) " \
"SELECT * FROM default d WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END and email in emails"
primary_query = "WITH emails as (SELECT raw email FROM default d WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END) " \
"SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END and email in emails"
self.run_cbq_query(query=create_query)
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
def test_flatten_cte_conflict(self):
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.date,r.ratings.Overall) FOR r IN reviews END, email)"
if self.use_unnest:
query = "with emails as (SELECT raw d.email FROM default d) " \
"select * from default d UNNEST d.reviews as emails where emails.ratings.Overall BETWEEN 1 and 3 and emails.date is not missing AND d.email in emails"
else:
query = "WITH emails as (SELECT raw email FROM default d ) " \
"SELECT * FROM default d WHERE ANY emails in reviews SATISFIES emails.ratings.Overall BETWEEN 1 and 3 AND emails.date is not missing END and email in emails"
self.run_cbq_query(query=create_query)
try:
self.run_cbq_query(query=query)
self.fail()
except Exception as e:
if self.use_unnest:
self.assertTrue("Duplicate UNNEST alias emails" in str(e), "The error is incorrect check the error {0}".format(str(e)))
else:
self.assertTrue("Duplicate variable emails" in str(e), "The error is incorrect check the error {0}".format(str(e)))
def test_flatten_alias_keyspace_collision(self):
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.date,r.ratings.Overall) FOR r IN reviews END, email)"
if self.use_unnest:
query = "SELECT * FROM default d UNNEST d.reviews AS d WHERE d.ratings.Overall BETWEEN 1 and 3 AND d.date is not missing"
primary_query = "SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS d WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing"
else:
query = "SELECT email FROM default d WHERE ANY d in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END"
primary_query = "SELECT email FROM default d USE INDEX (`#primary`) WHERE ANY d in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END"
self.run_cbq_query(query=create_query)
try:
self.run_cbq_query(query=query)
self.fail()
except Exception as e:
if self.use_unnest:
self.assertTrue("Duplicate UNNEST alias d" in str(e), "The error is incorrect check the error {0}".format(str(e)))
else:
self.assertTrue("Duplicate variable d" in str(e), "The error is incorrect check the error {0}".format(str(e)))
'''We expect the query to pick up the array with the keys flattened'''
def test_flatten_index_selection(self):
self.run_cbq_query(query="create index idx1 on default(country, DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)")
self.run_cbq_query(query="create index idx2 on default(country, DISTINCT ARRAY [r.author,r.ratings.Cleanliness] FOR r IN reviews END, email, free_parking)")
if self.use_unnest:
query = "SELECT * FROM default AS d unnest reviews as r WHERE r.author LIKE 'M%' AND r.ratings.Cleanliness = 3 AND free_parking = TRUE AND country IS NOT NULL"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'M%' AND r.ratings.Cleanliness = 3 AND free_parking = TRUE AND country IS NOT NULL"
else:
query = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' AND r.ratings.Cleanliness = 3 END AND free_parking = TRUE AND country IS NOT NULL"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' AND r.ratings.Cleanliness = 3 END AND free_parking = TRUE AND country IS NOT NULL"
explain_results = self.run_cbq_query(query="EXPLAIN " + query )
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == "idx1",
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
'''Test array index that contains some of the fields indexed and some fields that are in the array and not in the index'''
def test_flatten_partial_elements(self):
self.run_cbq_query(query = "CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)))")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query = "EXPLAIN SELECT * FROM default d WHERE ANY r in d.reviews satisfies r.ratings.Rooms > 1 AND r.ratings.Cleanliness < 5 AND CONTAINS(r.author,'F') END")
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
query_results = self.run_cbq_query(query = "SELECT * FROM default d WHERE ANY r in d.reviews satisfies r.ratings.Rooms > 1 AND r.ratings.Cleanliness < 5 AND CONTAINS(r.author,'F') END")
expected_results = self.run_cbq_query(query = "SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY r in d.reviews satisfies r.ratings.Rooms > 1 AND r.ratings.Cleanliness < 5 AND CONTAINS(r.author,'F') END")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
''''Test array index that contains some of the fields indexed and some fields that are in the array and not in the index'''
def test_flatten_unnest_partial_elements(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)))")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Cleanliness < 5 AND CONTAINS(r.author,'F')")
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(
query="SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Cleanliness < 5 AND CONTAINS(r.author,'F')")
expected_results = self.run_cbq_query(
query="SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Cleanliness < 5 AND CONTAINS(r.author,'F')")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
def test_flatten_named_params(self):
self.run_cbq_query(
query="create index idx1 on default(ALL ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking) where free_parking = True")
if self.use_unnest:
query = "SELECT * FROM default AS d unnest reviews as r WHERE r.author LIKE $author_name and r.ratings.Cleanliness > $cleaning_score " \
"AND d.free_parking = True AND d.country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE $author_name and r.ratings.Cleanliness > $cleaning_score " \
"AND d.free_parking = True AND d.country is not null"
query2 = "SELECT * FROM default AS d unnest reviews as r WHERE r.author LIKE $author_name and r.ratings.Cleanliness > $cleaning_score AND d.free_parking = False"
else:
query = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE $author_name and r.ratings.Cleanliness > $cleaning_score " \
"END AND free_parking = True AND country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author " \
"LIKE $author_name and r.ratings.Cleanliness > $cleaning_score END AND free_parking = True AND country is not null"
query2 = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE $author_name and r.ratings.Cleanliness > $cleaning_score END AND free_parking = False"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query, query_params={'$author_name': "M%", "$cleaning_score": 1})
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query=query, query_params={'$author_name': "M%", "$cleaning_score": 1})
expected_results = self.run_cbq_query(query=primary_query, query_params={'$author_name': "M%", "$cleaning_score": 1})
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
# Ensure partial index is not selected when it does not apply
explain_results = self.run_cbq_query(query="EXPLAIN " + query2, query_params={'$author_name': "M%", "$cleaning_score": 1})
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['index'] == '#primary',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
def test_flatten_ansi_joins(self):
self.load_travel_sample()
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Overall) for r in reviews END,email)"
query = "select * from `travel-sample`.inventory.hotel t INNER JOIN default d ON (ANY r in d.reviews satisfies " \
"r.author like 'M%' and r.ratings.Cleanliness > 1 END AND t.country = d.country AND ANY s in t.reviews " \
"SATISFIES s.author like 'M%' and s.ratings.Cleanliness > 1 END) "
self.run_cbq_query(query=create_query)
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(explain_results))
ansi_results = self.run_cbq_query(query=query)
self.assertTrue(ansi_results['status'] == 'success',
"Merge did not occur successfully! {0}".format(ansi_results))
def test_flatten_positional_params(self):
self.run_cbq_query(
query="create index idx1 on default(ALL ARRAY FLATTEN_KEYS(r.ratings.Overall,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking) where free_parking = True")
if self.use_unnest:
query = "SELECT * FROM default AS d unnest reviews as r WHERE r.ratings.Overall < $1 and r.ratings.Cleanliness > $2 " \
"AND d.free_parking = True AND d.country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.ratings.Overall < $1 and r.ratings.Cleanliness > $2 " \
"AND d.free_parking = True AND d.country is not null"
query2 = "SELECT * FROM default AS d unnest reviews as r WHERE r.ratings.Overall < $1 and r.ratings.Cleanliness > $2 AND d.free_parking = False"
else:
query = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.ratings.Overall < $1 and r.ratings.Cleanliness > $2 " \
"END AND free_parking = True AND country is not null"
primary_query = "SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.ratings.Overall < $1 " \
" and r.ratings.Cleanliness > $2 END AND free_parking = True AND country is not null"
query2 = "SELECT * FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.ratings.Overall < $1 and r.ratings.Cleanliness > $2 END AND free_parking = False"
args= 'args=[5,1]'
curl_output = self.shell.execute_command("{0} -u Administrator:password {1}:{2}/query/service -d 'statement=EXPLAIN {3}&{4}'".format(self.curl_path, self.master.ip, self.n1ql_port, query, args))
explain_results = self.convert_list_to_json(curl_output[0])
# Ensure the query is actually using the flatten index instead of primary
#explain_results = self.run_cbq_query(query="EXPLAIN " + query, query_params={'args': ["M%", 1]})
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
curl_output = self.shell.execute_command("{0} -u Administrator:password {1}:{2}/query/service -d 'statement={3}&{4}'".format(self.curl_path, self.master.ip, self.n1ql_port, query, args))
query_results = self.convert_list_to_json(curl_output[0])
curl_output = self.shell.execute_command("{0} -u Administrator:password {1}:{2}/query/service -d 'statement={3}&{4}'".format(self.curl_path, self.master.ip, self.n1ql_port, primary_query, args))
expected_results = self.convert_list_to_json(curl_output[0])
#query_results = self.run_cbq_query(query=query ,query_params={'args': ["M%", 1]})
#expected_results = self.run_cbq_query(query=primary_query, query_params={'args': ["M%", 1]})
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
# Ensure partial index is not selected when it does not apply
curl_output = self.shell.execute_command("{0} -u Administrator:password {1}:{2}/query/service -d statement='EXPLAIN {3}&{4}'".format(self.curl_path, self.master.ip, self.n1ql_port, query2, args))
explain_results = self.convert_list_to_json(curl_output[0])
#explain_results = self.run_cbq_query(query="EXPLAIN " + query2, query_params={'args': ["M%", 1]})
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['index'] == '#primary',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
##############################################################################################
#
# Query Context
##############################################################################################
def test_flatten_query_context_namespace_bucket_scope(self):
self.load_travel_sample()
create_query = "create index idx1 on hotel(DISTINCT ARRAY FLATTEN_KEYS(r.date,r.ratings.Overall) FOR r IN reviews END, email)"
if self.use_unnest:
query = "SELECT * FROM `travel-sample`.inventory.hotel d UNNEST d.reviews AS r WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing"
primary_query = "SELECT * FROM `travel-sample`.inventory.hotel d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing"
else:
query = "SELECT * FROM `travel-sample`.inventory.hotel d WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END"
primary_query = "SELECT * FROM `travel-sample`.inventory.hotel d USE INDEX (`#primary`) WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END"
self.run_cbq_query(query=create_query, query_context='default:`travel-sample`.inventory')
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(explain_results))
self.run_cbq_query(query="CREATE PRIMARY INDEX ON `travel-sample`.inventory.hotel")
self.compare_against_primary(query, primary_query)
def test_flatten_query_context_semicolon_bucket_scope(self):
self.load_travel_sample()
create_query = "create index idx1 on hotel(DISTINCT ARRAY FLATTEN_KEYS(r.date,r.ratings.Overall) FOR r IN reviews END, email)"
if self.use_unnest:
query = "SELECT * FROM `travel-sample`.inventory.hotel d UNNEST d.reviews AS r WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing"
primary_query = "SELECT * FROM `travel-sample`.inventory.hotel d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing"
else:
query = "SELECT * FROM `travel-sample`.inventory.hotel d WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END"
primary_query = "SELECT * FROM `travel-sample`.inventory.hotel d USE INDEX (`#primary`) WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END"
self.run_cbq_query(query=create_query, query_context=':`travel-sample`.inventory')
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(explain_results))
self.run_cbq_query(query="CREATE PRIMARY INDEX ON `travel-sample`.inventory.hotel")
self.compare_against_primary(query, primary_query)
def test_flatten_query_context_namespace(self):
self.load_travel_sample()
create_query = "create index idx1 on `travel-sample`.inventory.hotel(DISTINCT ARRAY FLATTEN_KEYS(r.date,r.ratings.Overall) FOR r IN reviews END, email) "
if self.use_unnest:
query = "SELECT * FROM `travel-sample`.inventory.hotel d UNNEST d.reviews AS r WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing"
primary_query = "SELECT * FROM `travel-sample`.inventory.hotel d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing"
else:
query = "SELECT * FROM `travel-sample`.inventory.hotel d WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END"
primary_query = "SELECT * FROM `travel-sample`.inventory.hotel d USE INDEX (`#primary`) WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END"
self.run_cbq_query(query=create_query, query_context='default:')
explain_results = self.run_cbq_query(query="EXPLAIN " + query, query_context='default:')
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(explain_results))
self.run_cbq_query(query="CREATE PRIMARY INDEX ON `travel-sample`.inventory.hotel")
self.compare_against_primary(query, primary_query)
def test_flatten_query_context(self):
self.load_travel_sample()
create_query = "create index idx1 on hotel(DISTINCT ARRAY FLATTEN_KEYS(r.date,r.ratings.Overall) FOR r IN reviews END, email)"
if self.use_unnest:
query = "SELECT * FROM hotel d UNNEST d.reviews AS r WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing"
primary_query = "SELECT * FROM hotel d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing"
else:
query = "SELECT * FROM hotel d WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END"
primary_query = "SELECT * FROM hotel d USE INDEX (`#primary`) WHERE ANY r in reviews SATISFIES r.ratings.Overall BETWEEN 1 and 3 AND r.date is not missing END"
self.run_cbq_query(query=create_query, query_context='`travel-sample`.inventory')
explain_results = self.run_cbq_query(query="EXPLAIN " + query, query_context='`travel-sample`.inventory')
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(explain_results))
self.run_cbq_query(query="CREATE PRIMARY INDEX ON `travel-sample`.inventory.hotel")
query_results = self.run_cbq_query(query=query, query_context='`travel-sample`.inventory')
expected_results = self.run_cbq_query(query=primary_query, query_context='`travel-sample`.inventory')
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
##############################################################################################
#
# Partitioning
##############################################################################################
'''Index has partitions'''
def test_flatten_partitioning(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end))) PARTITION BY HASH (META().id)")
if self.use_unnest:
query = "SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F')"
primary_query = "SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F')"
else:
query = "SELECT * FROM default d WHERE ANY r in reviews SATISFIES r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F') END"
primary_query = "SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY r in reviews SATISFIES r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F') END"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(query=query)
expected_results = self.run_cbq_query(query=primary_query)
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Index is partitioned by a non array field in the index'''
def test_flatten_partitioning_field(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`(email, (distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end))) PARTITION BY HASH (email)")
query = "SELECT * FROM default d WHERE ANY r in reivews SATISFIES r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F') END and d.email LIKE 'I%'"
primary_query = "SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY r in reivews SATISFIES r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F') END and d.email LIKE 'I%'"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(query=query)
expected_results = self.run_cbq_query(query=primary_query)
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Index is partitioned by a non array field in the index'''
def test_flatten_partitioning_field_array_leading(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)), email) PARTITION BY HASH (email)")
if self.use_unnest:
query = "SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F') and d.email LIKE 'I%'"
primary_query = "SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F') and d.email LIKE 'I%'"
else:
query = "SELECT * FROM default d WHERE ANY r in reviews SATISFIES r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F') END and d.email LIKE 'I%'"
primary_query = "SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY r in reviews SATISFIES r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F') END and d.email LIKE 'I%'"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
query_results = self.run_cbq_query(query=query)
expected_results = self.run_cbq_query(query=primary_query)
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Index is partitioned by an array field in the index'''
def test_flatten_partitioning_array_field(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end))) PARTITION BY HASH (reviews)")
if self.use_unnest:
query = "SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F') and d.email LIKE 'I%'"
primary_query = "SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F') and d.email LIKE 'I%'"
else:
query = "SELECT * FROM default d WHERE ANY r in reviews SATISFIES r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F') END and d.email LIKE 'I%'"
primary_query = "SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY r in reviews SATISFIES r.ratings.Cleanliness BETWEEN 1 and 3 AND CONTAINS(r.author,'F') END and d.email LIKE 'I%'"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(query=query)
expected_results = self.run_cbq_query(query=primary_query)
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
##############################################################################################
#
# PUSHDOWN
##############################################################################################
'''Pushdown will work on leading key of index'''
def test_flatten_groupby_pushdown_leading(self):
create_query = "create index idx1 on default(email, DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END,free_parking)"
self.run_cbq_query(query=create_query)
query = "SELECT email FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND email is not missing GROUP BY email"
primary_query = "SELECT email FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND email is not missing GROUP BY email"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
self.assertTrue("covers" in str(explain_results),
"The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue("index_group_aggs" in str(explain_results), "Index should be pushing down but it isn't. Please check the plan {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
'''Pushdown will work on leading key of index'''
def test_flatten_groupby_pushdown_array_leading(self):
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)"
query ="SELECT r.author FROM default AS d unnest reviews as r WHERE r.author LIKE 'M%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = True AND d.email is not missing GROUP BY r.author"
primary_query = "SELECT r.author FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'M%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = True AND d.email is not missing GROUP BY r.author;"
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
self.assertTrue("covers" in str(explain_results),
"The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue("index_group_aggs" in str(explain_results), "Index should be pushing down but it isn't. Please check the plan {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
'''Pushdown will work on groupby clause on query that uses diff order than index key order'''
def test_flatten_groupby_pushdown_order_unnest(self):
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)"
query = "SELECT r.author,d.email FROM default AS d unnest reviews as r WHERE r.author LIKE 'M%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = True AND d.email is not missing GROUP BY d.email,r.author"
primary_query = "SELECT r.author, d.email FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'M%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = True AND d.email is not missing GROUP BY d.email,r.author;"
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(explain_results))
self.assertTrue("covers" in str(explain_results),
"The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue("index_group_aggs" in str(explain_results),
"Index should be pushing down but it isn't. Please check the plan {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
def test_flatten_groupby_pushdown_order(self):
create_query = "create index idx1 on default(email, DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END,free_parking)"
self.run_cbq_query(query=create_query)
query = "SELECT free_parking,email FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND email is not missing GROUP BY free_parking,email"
primary_query = "SELECT free_parking,email FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND email is not missing GROUP BY free_parking,email"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
self.assertTrue("covers" in str(explain_results),
"The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue("index_group_aggs" in str(explain_results), "Index should be pushing down but it isn't. Please check the plan {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
'''Pushdown will work on non leading keys '''
def test_flatten_groupby_pushdown_nonleading(self):
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)"
self.run_cbq_query(query=create_query)
if self.use_unnest:
query = "SELECT d.email FROM default AS d unnest reviews as r WHERE r.author LIKE 'M%' and r.ratings.Cleanliness > 1 AND d.free_parking = True GROUP BY d.email"
primary_query = "SELECT d.email FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'M%' and r.ratings.Cleanliness > 1 AND d.free_parking = True GROUP BY d.email"
else:
query = "SELECT email FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END " \
"AND free_parking = True GROUP BY email"
primary_query = "SELECT email FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' " \
"and r.ratings.Cleanliness > 1 END AND free_parking = True GROUP BY email"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
self.assertTrue("covers" in str(explain_results),
"The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue("index_group_aggs" in str(explain_results), "Index should be pushing down but it isn't. Please check the plan {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
'''Pushdown with limit and offset works with group by on leading keys'''
def test_flatten_groupby_pushdown_limit_offset(self):
create_query = "create index idx1 on default(email, DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END,free_parking)"
query = "SELECT email FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND email is not missing GROUP BY email ORDER BY email LIMIT 10 OFFSET 5"
primary_query = "SELECT email FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True AND email is not missing GROUP BY email ORDER BY email LIMIT 10 OFFSET 5"
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
self.assertTrue("covers" in str(explain_results),
"The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue("index_group_aggs" in str(explain_results),
"Index should be pushing down but it isn't. Please check the plan {0}".format(explain_results))
'''Aggregate pushdown can work with no groupby clause'''
def test_flatten_aggregate_pushdown_no_group(self):
self.load_travel_sample()
create_query = "create index idx1 on `travel-sample`.inventory.hotel(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)"
if self.use_unnest:
query = "SELECT SUM(r.ratings.Cleanliness) FROM `travel-sample`.inventory.hotel AS d unnest reviews as r WHERE r.author LIKE 'N%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = False AND d.email is not missing"
primary_query = "SELECT SUM(r.ratings.Cleanliness) FROM `travel-sample`.inventory.hotel AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'N%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = False AND d.email is not missing"
else:
query = "SELECT COUNT(email), SUM(free_parking) FROM `travel-sample`.inventory.hotel AS d WHERE ANY r IN d.reviews SATISFIES r.author = 'Nella Ratke' and r.ratings.Cleanliness = 3 END AND free_parking = True AND email is not missing"
primary_query = "SELECT COUNT(email), SUM(free_parking) FROM `travel-sample`.inventory.hotel AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author = 'Nella Ratke' and r.ratings.Cleanliness = 3 END AND free_parking = True AND email is not missing"
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.run_cbq_query(query="CREATE PRIMARY INDEX ON `travel-sample`.inventory.hotel")
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
self.assertTrue("covers" in str(explain_results),
"The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue("index_group_aggs" in str(explain_results), "Index should be pushing down but it isn't. Please check the plan {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
'''Aggregate pushdown should work with letting and having'''
def test_flatten_aggregate_pushdown_letting_having(self):
self.load_travel_sample()
create_query = "create index idx1 on `travel-sample`.inventory.hotel(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)"
if self.use_unnest:
query = "SELECT SUM(r.ratings.Cleanliness) FROM `travel-sample`.inventory.hotel AS d unnest reviews as r " \
"WHERE r.author LIKE 'N%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = False " \
"AND d.email is not missing GROUP BY r.ratings.Cleanliness LETTING min_cleanliness = 5 HAVING COUNT(r.ratings.Cleanliness) > min_cleanliness"
primary_query = "SELECT SUM(r.ratings.Cleanliness) FROM `travel-sample`.inventory.hotel AS d USE INDEX (`#primary`) unnest reviews as r " \
"WHERE r.author LIKE 'N%' and r.author is not missing and r.ratings.Cleanliness > 1 AND " \
"d.free_parking = False AND d.email is not missing GROUP BY r.ratings.Cleanliness LETTING min_cleanliness = 5 HAVING COUNT(r.ratings.Cleanliness) > min_cleanliness"
else:
query = "SELECT MAX(email), MIN(email), AVG( free_parking) FROM `travel-sample`.inventory.hotel AS d WHERE ANY r IN d.reviews SATISFIES r.author = 'Nella Ratke' and r.ratings.Cleanliness = 3 END AND free_parking = True AND email is not missing GROUP BY email, free_parking LETTING avg_parking = 1 HAVING AVG(free_parking) > avg_parking"
primary_query = "SELECT MAX(email), MIN(email), AVG( free_parking) FROM `travel-sample`.inventory.hotel AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author = 'Nella Ratke' and r.ratings.Cleanliness = 3 END AND free_parking = True AND email is not missing GROUP BY email, free_parking LETTING avg_parking = 1 HAVING AVG(free_parking) > avg_parking"
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.run_cbq_query(query="CREATE PRIMARY INDEX ON `travel-sample`.inventory.hotel")
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(explain_results))
self.assertTrue("covers" in str(explain_results),
"The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue("index_group_aggs" in str(explain_results),
"Index should be pushing down but it isn't. Please check the plan {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
def test_flatten_aggregate_pushdown_distinct(self):
self.load_travel_sample()
create_query = "create index idx1 on `travel-sample`.inventory.hotel(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)"
if self.use_unnest:
query = "SELECT SUM( DISTINCT r.ratings.Cleanliness) FROM `travel-sample`.inventory.hotel AS d unnest reviews as r WHERE r.author LIKE 'N%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = False AND d.email is not missing GROUP BY r.author"
primary_query = "SELECT SUM( DISTINCT r.ratings.Cleanliness) FROM `travel-sample`.inventory.hotel AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'N%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = False AND d.email is not missing GROUP BY r.author"
else:
query = "SELECT COUNT(DISTINCT email), SUM( DISTINCT free_parking) FROM `travel-sample`.inventory.hotel AS d WHERE ANY r IN d.reviews SATISFIES r.author = 'Nella Ratke' and r.ratings.Cleanliness = 3 END AND free_parking = True AND email is not missing GROUP BY email, free_parking"
primary_query = "SELECT COUNT(DISTINCT email), SUM(DISTINCT free_parking) FROM `travel-sample`.inventory.hotel AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author = 'Nella Ratke' and r.ratings.Cleanliness = 3 END AND free_parking = True AND email is not missing GROUP BY email, free_parking"
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.run_cbq_query(query="CREATE PRIMARY INDEX ON `travel-sample`.inventory.hotel")
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
self.assertTrue("covers" in str(explain_results),
"The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue("index_group_aggs" in str(explain_results), "Index should be pushing down but it isn't. Please check the plan {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
def test_flatten_aggregate_avg_unnest(self):
self.load_travel_sample()
create_query = "create index idx1 on `travel-sample`.inventory.hotel(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)"
query = "SELECT AVG( r.ratings.Cleanliness) FROM `travel-sample`.inventory.hotel AS d unnest reviews as r WHERE r.author LIKE 'N%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = False AND d.email is not missing GROUP BY r.ratings.Cleanliness"
primary_query = "SELECT AVG( r.ratings.Cleanliness) FROM `travel-sample`.inventory.hotel AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'N%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = False AND d.email is not missing GROUP BY r.ratings.Cleanliness"
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.run_cbq_query(query="CREATE PRIMARY INDEX ON `travel-sample`.inventory.hotel")
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
self.assertTrue("covers" in str(explain_results),
"The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue("index_group_aggs" in str(explain_results), "Index should be pushing down but it isn't. Please check the plan {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
def test_flatten_aggregate_min_max_unnest(self):
self.load_travel_sample()
create_query = "create index idx1 on `travel-sample`.inventory.hotel(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)"
query = "SELECT MIN(r.ratings.Cleanliness), MAX(r.ratings.Cleanliness) FROM `travel-sample`.inventory.hotel AS d unnest reviews as r WHERE r.author LIKE 'N%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = False AND d.email is not missing GROUP BY r.ratings.Cleanliness"
primary_query = "SELECT MIN(r.ratings.Cleanliness), MAX(r.ratings.Cleanliness) FROM `travel-sample`.inventory.hotel AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE 'N%' and r.author is not missing and r.ratings.Cleanliness > 1 AND d.free_parking = False AND d.email is not missing GROUP BY r.ratings.Cleanliness"
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.run_cbq_query(query="CREATE PRIMARY INDEX ON `travel-sample`.inventory.hotel")
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
self.assertTrue("covers" in str(explain_results),
"The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue("index_group_aggs" in str(explain_results), "Index should be pushing down but it isn't. Please check the plan {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
##############################################################################################
#
# GROUPBY/ORDERBY/LIMIT/OFFSET
##############################################################################################
def test_flatten_groupby_non_array(self):
self.run_cbq_query(
query="CREATE INDEX idx1 ON default(DISTINCT ARRAY flatten_keys(r.ratings.Cleanliness,r.ratings.Rooms,r.author) FOR r IN reviews END, email)")
if self.use_unnest:
query = "SELECT d.email FROM default AS d unnest reviews as r WHERE r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) GROUP BY d.email ORDER BY d.email"
primary_query = "SELECT d.email FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) GROUP BY d.email ORDER BY d.email"
else:
query = "SELECT d.email FROM default AS d WHERE ANY r IN d.reviews " \
"SATISFIES r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) END " \
"GROUP BY d.email ORDER BY d.email"
primary_query = "SELECT d.email FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews " \
"SATISFIES r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) END " \
"GROUP BY d.email ORDER BY d.email"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
query_results = self.run_cbq_query(query=query)
expected_results = self.run_cbq_query(query=primary_query)
diffs = DeepDiff(query_results['results'], expected_results['results'])
if diffs:
self.assertTrue(False, diffs)
def test_flatten_groupby_array(self):
self.run_cbq_query(
query="CREATE INDEX idx1 ON default(DISTINCT ARRAY flatten_keys(r.ratings.Cleanliness,r.ratings.Rooms,r.author) FOR r IN reviews END, country)")
if self.use_unnest:
query = "SELECT r.author FROM default AS d unnest reviews as r WHERE r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) GROUP BY r.author ORDER BY r.author"
primary_query = "SELECT r.author FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) GROUP BY r.author ORDER BY r.author"
else:
query = "SELECT d.reviews FROM default AS d WHERE ANY r IN d.reviews " \
"SATISFIES r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) END " \
"GROUP BY d.reviews ORDER BY d.reviews"
primary_query = "SELECT d.reviews FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews " \
"SATISFIES r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) END " \
"GROUP BY d.reviews ORDER BY d.reviews"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
query_results = self.run_cbq_query(query=query)
expected_results = self.run_cbq_query(query=primary_query)
diffs = DeepDiff(query_results['results'], expected_results['results'])
if diffs:
self.assertTrue(False, diffs)
def test_flatten_groupby_limit(self):
self.run_cbq_query(
query="CREATE INDEX idx1 ON default(DISTINCT ARRAY flatten_keys(r.ratings.Cleanliness,r.ratings.Rooms,r.author) FOR r IN reviews END, country)")
if self.use_unnest:
query = "SELECT d.country, count(r.author) FROM default AS d unnest reviews as r WHERE r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) GROUP BY r.author,d.country ORDER BY r.author,d.country LIMIT 10"
primary_query = "SELECT d.country, count(r.author) FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) GROUP BY r.author,d.country ORDER BY r.author,d.country LIMIT 10"
else:
query = "SELECT d.country, COUNT(d.reviews) FROM default AS d WHERE ANY r IN d.reviews " \
"SATISFIES r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) END " \
"GROUP BY d.reviews,d.country ORDER BY d.reviews,d.country LIMIT 10"
primary_query = "SELECT d.country, COUNT(d.reviews) FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews " \
"SATISFIES r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) END " \
"GROUP BY d.reviews,d.country ORDER BY d.reviews,d.country LIMIT 10"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
query_results = self.run_cbq_query(query=query)
expected_results = self.run_cbq_query(query=primary_query)
diffs = DeepDiff(query_results['results'], expected_results['results'])
if diffs:
self.assertTrue(False, diffs)
def test_flatten_groupby_limit_offset(self):
self.run_cbq_query(
query="CREATE INDEX idx1 ON default(DISTINCT ARRAY flatten_keys(r.ratings.Cleanliness,r.ratings.Rooms,r.author) FOR r IN reviews END, country)")
if self.use_unnest:
query = "SELECT d.country, count(r.author) FROM default AS d unnest reviews as r WHERE r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) GROUP BY r.author,d.country ORDER BY r.author,d.country LIMIT 10 OFFSET 5"
primary_query = "SELECT d.country, count(r.author) FROM default AS d USE INDEX (`#primary`) unnest reviews as r WHERE r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) GROUP BY r.author,d.country ORDER BY r.author,d.country LIMIT 10 OFFSET 5"
else:
query = "SELECT d.country, COUNT(d.reviews) FROM default AS d WHERE ANY r IN d.reviews " \
"SATISFIES r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) END " \
"GROUP BY d.reviews,d.country ORDER BY d.reviews,d.country LIMIT 10 OFFSET 5"
primary_query = "SELECT d.country, COUNT(d.reviews) FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews " \
"SATISFIES r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) END " \
"GROUP BY d.reviews,d.country ORDER BY d.reviews,d.country LIMIT 10 OFFSET 5"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
query_results = self.run_cbq_query(query=query)
expected_results = self.run_cbq_query(query=primary_query)
diffs = DeepDiff(query_results['results'], expected_results['results'])
if diffs:
self.assertTrue(False, diffs)
def test_flatten_orderby_limit(self):
self.run_cbq_query(
query="CREATE INDEX idx1 ON default(DISTINCT ARRAY flatten_keys(r.ratings.Cleanliness,r.ratings.Rooms,r.author) FOR r IN reviews END, country)")
if self.use_unnest:
query = "SELECT d.country, d.reviews FROM default AS d UNNEST reviews as r WHERE r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) ORDER BY d.reviews,d.country LIMIT 10"
primary_query = "SELECT d.country, d.reviews FROM default AS d USE INDEX (`#primary`) UNNEST reviews as r WHERE r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) ORDER BY d.reviews,d.country LIMIT 10"
else:
query = "SELECT d.country, d.reviews FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author " \
"LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) END ORDER BY d.reviews,d.country LIMIT 10"
primary_query = "SELECT d.country, d.reviews FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author " \
"LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) END ORDER BY d.reviews,d.country LIMIT 10"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
query_results = self.run_cbq_query(query=query)
expected_results = self.run_cbq_query(query=primary_query)
diffs = DeepDiff(query_results['results'], expected_results['results'])
if diffs:
self.assertTrue(False, diffs)
def test_flatten_orderby_limit_offset(self):
self.run_cbq_query(
query="CREATE INDEX idx1 ON default(DISTINCT ARRAY flatten_keys(r.ratings.Cleanliness,r.ratings.Rooms,r.author) FOR r IN reviews END, country)")
if self.use_unnest:
query = "SELECT d.country, d.reviews FROM default AS d UNNEST reviews as r WHERE r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) ORDER BY d.reviews,d.country LIMIT 10 OFFSET 5"
primary_query = "SELECT d.country, d.reviews FROM default AS d USE INDEX (`#primary`) UNNEST reviews as r WHERE r.author LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) ORDER BY d.reviews,d.country LIMIT 10 OFFSET 5"
else:
query = "SELECT d.country, d.reviews FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author " \
"LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) END ORDER BY d.reviews,d.country LIMIT 10 OFFSET 5"
primary_query = "SELECT d.country, d.reviews FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author " \
"LIKE '%m' AND (r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3) END ORDER BY d.reviews,d.country LIMIT 10 OFFSET 5"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),"The query should be using idx1, check explain results {0}".format(explain_results))
query_results = self.run_cbq_query(query=query)
expected_results = self.run_cbq_query(query=primary_query)
diffs = DeepDiff(query_results['results'], expected_results['results'])
if diffs:
self.assertTrue(False, diffs)
##############################################################################################
#
# DML
##############################################################################################
'''Test insert that uses a query that uses an index with flatten_keys in it '''
def test_flatten_insert(self):
self.load_travel_sample()
self.run_cbq_query(
query="CREATE INDEX idx1 ON `default`((ALL (ARRAY(ALL (ARRAY flatten_keys(n,v) FOR n:v IN (`r`.`ratings`) END)) FOR `r` IN `reviews` END)))")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN INSERT INTO `travel-sample`.inventory.landmark (KEY foo, VALUE bar) "
"SELECT META(doc).id AS foo, doc AS bar FROM `default` AS doc WHERE ANY r IN doc.reviews SATISFIES "
"ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
insert_results = self.run_cbq_query(query="INSERT INTO `travel-sample`.inventory.landmark (KEY foo, VALUE bar) "
"SELECT META(doc).id AS foo, doc AS bar FROM `default` AS doc WHERE "
"ANY r IN doc.reviews SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' "
"AND v = 3 END END")
self.assertTrue(insert_results['status'] == 'success', "Index was not successfully created! {0}".format(insert_results))
query_results = self.run_cbq_query(
query="SELECT d.country, d.address, d.free_parking, d.city, d.type, d.url, d.phone, d.price, d.avg_rating, d.name, d.email FROM `travel-sample`.inventory.landmark AS d WHERE ANY r IN d.reviews SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END ")
old_results = self.run_cbq_query(query="SELECT d.country, d.address, d.free_parking, d.city, d.type, d.url, d.phone, d.price, d.avg_rating, d.name, d.email FROM default AS d WHERE ANY r IN d.reviews SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END ")
diffs = DeepDiff(query_results['results'], old_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test upsert that uses a query that uses an index with flatten_keys in it'''
def test_flatten_upsert(self):
self.load_travel_sample()
self.run_cbq_query(
query="CREATE INDEX idx1 ON `default`((ALL (ARRAY(ALL (ARRAY flatten_keys(n,v) FOR n:v IN (`r`.`ratings`) END)) FOR `r` IN `reviews` END)))")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN UPSERT INTO `travel-sample`.inventory.landmark (KEY foo, VALUE bar) "
"SELECT META(doc).id AS foo, doc AS bar FROM `default` AS doc WHERE ANY r IN doc.reviews SATISFIES "
"ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
insert_results = self.run_cbq_query(query="UPSERT INTO `travel-sample`.inventory.landmark (KEY foo, VALUE bar) "
"SELECT META(doc).id AS foo, doc AS bar FROM `default` AS doc WHERE "
"ANY r IN doc.reviews SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' "
"AND v = 3 END END")
self.assertTrue(insert_results['status'] == 'success',
"Index was not successfully created! {0}".format(insert_results))
query_results = self.run_cbq_query(
query="SELECT d.country, d.address, d.free_parking, d.city, d.type, d.url, d.phone, d.price, d.avg_rating, d.name, d.email FROM `travel-sample`.inventory.landmark AS d WHERE ANY r IN d.reviews SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END")
old_results = self.run_cbq_query(query="SELECT d.country, d.address, d.free_parking, d.city, d.type, d.url, d.phone, d.price, d.avg_rating, d.name, d.email FROM default AS d WHERE ANY r IN d.reviews "
"SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END")
diffs = DeepDiff(query_results['results'], old_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
update_results = self.run_cbq_query(query="UPSERT INTO `travel-sample`.inventory.landmark (KEY foo, VALUE bar) "
"SELECT META(doc).id AS foo, doc AS bar FROM `default` AS doc WHERE "
"ANY r IN doc.reviews SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' "
"AND v = 3 END END")
self.assertTrue(insert_results['status'] == 'success',
"Index was not successfully created! {0}".format(insert_results))
query_results = self.run_cbq_query(
query="SELECT d.country, d.address, d.free_parking, d.city, d.type, d.url, d.phone, d.price, d.avg_rating, d.name, d.email FROM `travel-sample`.inventory.landmark AS d WHERE ANY r IN d.reviews SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END")
old_results = self.run_cbq_query(query="SELECT d.country, d.address, d.free_parking, d.city, d.type, d.url, d.phone, d.price, d.avg_rating, d.name, d.email FROM default AS d WHERE ANY r IN d.reviews "
"SATISFIES ANY n:v IN r.ratings SATISFIES n = 'Cleanliness' AND v = 3 END END")
diffs = DeepDiff(query_results['results'], old_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
def test_flatten_update(self):
self.load_travel_sample()
self.run_cbq_query(query="create index idx1 on default(country, DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, avg_rating)")
update_results = self.run_cbq_query(
query="UPDATE `travel-sample`.inventory.airport "
"SET foo = 9 WHERE country IN "
"(SELECT RAW country FROM default d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' AND r.ratings.Cleanliness = 3 END "
"AND avg_rating <= 3 AND country IS NOT NULL)")
self.assertTrue(update_results['status'] == "success")
mutation_count = update_results['metrics']['mutationCount']
verify_results = self.run_cbq_query(query="select foo from `travel-sample`.inventory.airport where foo = 9")
self.assertEqual(verify_results['metrics']['resultCount'], mutation_count, "Results mismatched, here are the verify_results {0}".format(verify_results))
def test_flatten_delete(self):
self.run_cbq_query(
query="CREATE INDEX idx1 ON default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, free_parking) WHERE ANY r IN default.reviews SATISFIES r.author LIKE 'M%' END")
explain_results = self.run_cbq_query(query="explain delete from default d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness = 3 END AND free_parking = True")
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
self.asserTrue("covers" in str(explain_results), "This query should be covered by the index but it is not: plan {0}",format(explain_results))
delete_results = self.run_cbq_query(query="delete from default d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness = 3 END AND free_parking = True")
self.assertTrue(delete_results['status'] == 'success',
"Index was not successfully created! {0}".format(delete_results))
# Ensure no documents remain that fit the criteria
primary_results = self.run_cbq_query(query="SELECT * FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness = 3 END AND free_parking = True")
self.assertTrue(primary_results['metrics']['resultCount'] == 0 ,"There are results! But there should be no results for this query {0}".format(primary_results))
def test_flatten_ansi_merge(self):
self.load_travel_sample()
query = "MERGE INTO default d USING `travel-sample`.inventory.hotel t ON t.country = d.country and any r in d.reviews satisfies r.author like 'M%' and r.ratings.Overall > 3 END WHEN MATCHED THEN DELETE WHERE d.free_parking = true"
self.run_cbq_query(
query="create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Overall) for r in reviews END,country)")
explain_results = self.run_cbq_query(query="explain " + query)
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1 check explain results {0}".format(
explain_results))
merge_results = self.run_cbq_query(query=query)
self.assertTrue(merge_results['status'] == 'success',
"Merge did not occur successfully! {0}".format(merge_results))
##############################################################################################
#
# Covering Tests
##############################################################################################
'''Test a array index covering'''
def test_flatten_cover(self):
if self.explicit:
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking, reviews)"
else:
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email, free_parking)"
if self.use_all:
create_query = create_query.replace("DISTINCT", "ALL")
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT email FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
self.assertTrue("covers" in str(explain_results), "The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT email FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
expected_results = self.run_cbq_query(query="SELECT email FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test a array index covering'''
def test_flatten_cover_no_leading(self):
create_query = "create index idx1 on default(free_parking, DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email)"
if self.use_all:
create_query = create_query.replace("DISTINCT", "ALL")
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT email FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
self.assertTrue("covers" in str(explain_results), "The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT email FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
expected_results = self.run_cbq_query(query="SELECT email FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test covering using the flatten array_index'''
def test_flatten_cover_no_any(self):
create_query = "create index idx1 on default(free_parking, DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, email)"
if self.use_all:
create_query = create_query.replace("DISTINCT", "ALL")
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT email FROM default AS d WHERE free_parking = True")
self.assertTrue("covers" in str(explain_results), "The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT email FROM default AS d WHERE free_parking = True")
expected_results = self.run_cbq_query(query="SELECT email FROM default AS d USE INDEX (`#primary`) WHERE free_parking = True")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''You need to use reviews explicitly stated for covering in any and every'''
def test_flatten_any_and_every_non_cover(self):
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, phone, free_parking)"
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT phone FROM default AS d WHERE ANY AND EVERY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
self.assertTrue("covers" not in str(explain_results), "The index is covering, it shouldn't be. Check plan {0}".format(explain_results))
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT phone FROM default AS d WHERE ANY AND EVERY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
expected_results = self.run_cbq_query(query="SELECT phone FROM default AS d USE INDEX (`#primary`) WHERE ANY AND EVERY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''You need to use reviews explicitly stated for covering in any and every'''
def test_flatten_any_and_every_cover(self):
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, address, free_parking, reviews)"
if self.use_all:
create_query = create_query.replace("DISTINCT", "ALL")
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT address FROM default AS d WHERE ANY AND EVERY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
self.assertTrue("covers" in str(explain_results), "The index is covering, it shouldn't be. Check plan {0}".format(explain_results))
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT address FROM default AS d WHERE ANY AND EVERY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
expected_results = self.run_cbq_query(query="SELECT address FROM default AS d USE INDEX (`#primary`) WHERE ANY AND EVERY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''You need to use reviews explicitly stated for covering in any and every'''
def test_flatten_any_and_every_cover_non_leading(self):
self.run_cbq_query(query="create index idx1 on default(free_parking, DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, address, reviews)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT address FROM default AS d WHERE ANY AND EVERY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
self.assertTrue("covers" in str(explain_results), "The index is covering, it shouldn't be. Check plan {0}".format(explain_results))
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT address FROM default AS d WHERE ANY AND EVERY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
expected_results = self.run_cbq_query(query="SELECT address FROM default AS d USE INDEX (`#primary`) WHERE ANY AND EVERY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''You need to use reviews explicitly stated for covering in any and every'''
def test_flatten_any_and_every_cover_array_leading(self):
self.run_cbq_query(query="create index idx1 on default(reviews, DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END, address, free_parking)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT address FROM default AS d WHERE ANY AND EVERY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
self.assertTrue("covers" not in str(explain_results), "The index is covering, it shouldn't be. Check plan {0}".format(explain_results))
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['index'] == '#primary',
"The correct index is not being used or the plan is different than expected! Expected primary got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT address FROM default AS d WHERE ANY AND EVERY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
expected_results = self.run_cbq_query(query="SELECT address FROM default AS d USE INDEX (`#primary`) WHERE ANY AND EVERY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test array indexing cover with a when clause, query has field not explicitly in index but since the field is in when it should be covering'''
def test_flatten_when_cover(self):
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews WHEN r.ratings.Rooms < 3 END, email, free_parking)"
if self.use_all:
create_query = create_query.replace("DISTINCT", "ALL")
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT email FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3 END AND free_parking = True")
self.assertTrue("covers" in str(explain_results), "The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT email FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3 END AND free_parking = True")
expected_results = self.run_cbq_query(query="SELECT email FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3 END AND free_parking = True")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test array indexing cover with a when clause, query has field not explicitly in index but since the field is in when it should be covering'''
def test_flatten_when_cover_non_leading(self):
create_query = "create index idx1 on default(free_parking, DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews WHEN r.ratings.Rooms < 3 END, email)"
if self.use_all:
create_query = create_query.replace("DISTINCT", "ALL")
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT email FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3 END AND free_parking = True")
self.assertTrue("covers" in str(explain_results), "The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT email FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3 END AND free_parking = True")
expected_results = self.run_cbq_query(query="SELECT email FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 AND r.ratings.Rooms < 3 END AND free_parking = True")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test covering with a partial array_flattening index'''
def test_flatten_partial_cover(self):
create_query = "create index idx1 on default(ALL ARRAY FLATTEN_KEYS(r.author,r.ratings.Cleanliness) FOR r IN reviews END,url) where free_parking = True"
if self.use_all:
create_query = create_query.replace("DISTINCT", "ALL")
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT url FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
self.assertTrue("covers" in str(explain_results), "The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(
query="SELECT url FROM default AS d WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
expected_results = self.run_cbq_query(
query="SELECT url FROM default AS d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.author LIKE 'M%' and r.ratings.Cleanliness > 1 END AND free_parking = True")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''With distinct keyword, unnest can't cover, with all keyword, unnest can cover'''
def test_flatten_unnest_cover(self):
if self.explicit:
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Rooms) FOR r IN reviews END, email, country, reviews)"
else:
create_query = "create index idx1 on default(DISTINCT ARRAY FLATTEN_KEYS(r.author,r.ratings.Rooms) FOR r IN reviews END, email, country)"
if self.use_all:
create_query = create_query.replace("DISTINCT", "ALL")
self.run_cbq_query(query=create_query)
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN SELECT d.email FROM default AS d UNNEST reviews AS rev "
"WHERE rev.author LIKE 'M%' AND rev.ratings.Rooms > 1 AND d.country IS NOT NULL")
if self.use_all:
self.assertTrue("covers" in str(explain_results), "The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue("filter_covers" in str(explain_results), "The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
else:
self.assertTrue("covers" not in str(explain_results), "The index is not covering, it should be. Check plan {0}".format(explain_results))
self.assertTrue(explain_results['results'][0]['plan']['~children'][0]['scan']['index'] == 'idx1',
"The correct index is not being used or the plan is different than expected! Expected idx1 got {0}".format(
explain_results))
query_results = self.run_cbq_query(query="SELECT d.email FROM default AS d UNNEST reviews AS rev "
"WHERE rev.author LIKE 'M%' AND rev.ratings.Rooms > 1 AND d.country IS NOT NULL")
expected_results = self.run_cbq_query(query="SELECT d.email FROM default AS d USE INDEX (`#primary`) UNNEST reviews AS rev "
"WHERE rev.author LIKE 'M%' AND rev.ratings.Rooms > 1 AND d.country IS NOT NULL")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
##############################################################################################
#
# Intersect/Union/Except
##############################################################################################
'''Test intersect scan between two any/any and every arrays'''
def test_flatten_intersect_any(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)))")
self.run_cbq_query(query="CREATE INDEX `idx2` ON `default`((distinct (array flatten_keys(((`r`.`ratings`).`Rooms`), ((`r`.`ratings`).`Overall`)) for `r` in `reviews` end)),`email`)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d WHERE ANY r IN d.reviews SATISFIES r.ratings.Cleanliness > 1 "
"AND r.author LIKE 'M%' END AND ANY s IN d.reviews SATISFIES s.ratings.Rooms > 3 AND s.ratings.Overall > 1 END")
self.assertTrue("IntersectScan" in str(explain_results), "The query should be using an instersect scan, check explain results {0}".format(explain_results))
self.assertTrue("idx1" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
self.assertTrue("idx2" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(
query="SELECT * FROM default d WHERE ANY r IN d.reviews SATISFIES r.ratings.Cleanliness > 1 "
"AND r.author LIKE 'M%' END AND ANY s IN d.reviews SATISFIES s.ratings.Rooms > 3 AND s.ratings.Overall > 1 END")
expected_results = self.run_cbq_query(
query="SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.ratings.Cleanliness > 1 "
"AND r.author LIKE 'M%' END AND ANY s IN d.reviews SATISFIES s.ratings.Rooms > 3 AND s.ratings.Overall > 1 END")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test intersect scan between an any clause and an unnest clause'''
def test_flatten_intersect_any_unnest(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)))")
self.run_cbq_query(query="CREATE INDEX `idx2` ON `default`((distinct (array flatten_keys(((`r`.`ratings`).`Rooms`), ((`r`.`ratings`).`Overall`)) for `r` in `reviews` end)),`email`)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%'"
" AND ANY s IN d.reviews SATISFIES r.ratings.Rooms > 1 AND r.ratings.Overall < 5 END")
self.assertTrue("IntersectScan" in str(explain_results), "The query should be using an instersect scan, check explain results {0}".format(explain_results))
self.assertTrue("Unnest" in str(explain_results), "The query should be using an unnest, check explain results {0}".format(explain_results))
self.assertTrue("idx1" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
self.assertTrue("idx2" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(
query="SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%'"
" AND ANY s IN d.reviews SATISFIES r.ratings.Rooms > 1 AND r.ratings.Overall < 5 END")
expected_results = self.run_cbq_query(
query="SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%'"
" AND ANY s IN d.reviews SATISFIES r.ratings.Rooms > 1 AND r.ratings.Overall < 5 END")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test intersect scan between two unnest clauses'''
def test_flatten_intersect_unnest(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)))")
self.run_cbq_query(query="CREATE INDEX `idx2` ON `default`((distinct (array flatten_keys(((`r`.`ratings`).`Rooms`), ((`r`.`ratings`).`Overall`)) for `r` in `reviews` end)),`email`)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%'"
" AND r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
self.assertTrue("IntersectScan" in str(explain_results), "The query should be using an instersect scan, check explain results {0}".format(explain_results))
self.assertTrue("Unnest" in str(explain_results), "The query should be using an unnest, check explain results {0}".format(explain_results))
self.assertTrue("idx1" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
self.assertTrue("idx2" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(
query="SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%'"
" AND r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
expected_results = self.run_cbq_query(
query="SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%'"
" AND r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test unionall scan between two any/any and every arrays'''
def test_flatten_union_any(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)))")
self.run_cbq_query(query="CREATE INDEX `idx2` ON `default`((distinct (array flatten_keys(((`r`.`ratings`).`Rooms`), ((`r`.`ratings`).`Overall`)) for `r` in `reviews` end)),`email`)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d WHERE ANY r IN d.reviews SATISFIES r.ratings.Cleanliness > 1 "
"AND r.author LIKE 'M%' END UNION SELECT * FROM default d WHERE ANY s IN d.reviews SATISFIES s.ratings.Rooms > 3 AND s.ratings.Overall > 1 END")
self.assertTrue("UnionAll" in str(explain_results), "The query should be using an instersect scan, check explain results {0}".format(explain_results))
self.assertTrue("idx1" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
self.assertTrue("idx2" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(
query="SELECT * FROM default d WHERE ANY r IN d.reviews SATISFIES r.ratings.Cleanliness > 1 "
"AND r.author LIKE 'M%' END UNION SELECT * FROM default d WHERE ANY s IN d.reviews SATISFIES s.ratings.Rooms > 3 AND s.ratings.Overall > 1 END")
expected_results = self.run_cbq_query(
query="SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.ratings.Cleanliness > 1 "
"AND r.author LIKE 'M%' END UNION SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY s IN d.reviews SATISFIES s.ratings.Rooms > 3 AND s.ratings.Overall > 1 END")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test union all scan between an any clause and an unnest clause'''
def test_flatten_union_any_unnest(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)))")
self.run_cbq_query(query="CREATE INDEX `idx2` ON `default`((distinct (array flatten_keys(((`r`.`ratings`).`Rooms`), ((`r`.`ratings`).`Overall`)) for `r` in `reviews` end)),`email`)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%'"
" UNION SELECT * FROM default d WHERE ANY s IN d.reviews SATISFIES s.ratings.Rooms > 1 AND s.ratings.Overall < 5 END")
self.assertTrue("UnionAll" in str(explain_results), "The query should be using an unionall scan, check explain results {0}".format(explain_results))
self.assertTrue("Unnest" in str(explain_results), "The query should be using an unnest, check explain results {0}".format(explain_results))
self.assertTrue("idx1" in str(explain_results),
"The query should be using an union all scan between idx1 and idx2, check explain results {0}".format(
explain_results))
self.assertTrue("idx2" in str(explain_results),
"The query should be using an union all scan between idx1 and idx2, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(
query="SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%'"
" UNION SELECT * FROM default d WHERE ANY s IN d.reviews SATISFIES s.ratings.Rooms > 1 AND s.ratings.Overall < 5 END")
expected_results = self.run_cbq_query(
query="SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%'"
" UNION SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY s IN d.reviews SATISFIES s.ratings.Rooms > 1 AND s.ratings.Overall < 5 END")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test union all scan between two any/any and every arrays'''
def test_flatten_union_unnest(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)))")
self.run_cbq_query(query="CREATE INDEX `idx2` ON `default`((distinct (array flatten_keys(((`r`.`ratings`).`Rooms`), ((`r`.`ratings`).`Overall`)) for `r` in `reviews` end)),`email`)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%' "
"UNION SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
self.assertTrue("UnionAll" in str(explain_results), "The query should be using an instersect scan, check explain results {0}".format(explain_results))
self.assertTrue("idx1" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
self.assertTrue("idx2" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(
query="SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%' "
"UNION SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
expected_results = self.run_cbq_query(
query="SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%' "
"UNION SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test exceptall scan between two unnest'''
def test_flatten_except_unnest(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)))")
self.run_cbq_query(query="CREATE INDEX `idx2` ON `default`((distinct (array flatten_keys(((`r`.`ratings`).`Rooms`), ((`r`.`ratings`).`Overall`)) for `r` in `reviews` end)),`email`)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%' "
"EXCEPT SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
self.assertTrue("ExceptAll" in str(explain_results), "The query should be using an instersect scan, check explain results {0}".format(explain_results))
self.assertTrue("idx1" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
self.assertTrue("idx2" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(
query="SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%' "
"EXCEPT SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
expected_results = self.run_cbq_query(
query="SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%' "
"EXCEPT SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test unionall scan between two any/any and every arrays'''
def test_flatten_intersectall_any(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)))")
self.run_cbq_query(query="CREATE INDEX `idx2` ON `default`((distinct (array flatten_keys(((`r`.`ratings`).`Rooms`), ((`r`.`ratings`).`Overall`)) for `r` in `reviews` end)),`email`)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d WHERE ANY r IN d.reviews SATISFIES r.ratings.Cleanliness > 1 "
"AND r.author LIKE 'M%' END INTERSECT SELECT * FROM default d WHERE ANY s IN d.reviews SATISFIES s.ratings.Rooms > 3 AND s.ratings.Overall > 1 END")
self.assertTrue("IntersectAll" in str(explain_results), "The query should be using an instersect scan, check explain results {0}".format(explain_results))
self.assertTrue("idx1" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
self.assertTrue("idx2" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(
query="SELECT * FROM default d WHERE ANY r IN d.reviews SATISFIES r.ratings.Cleanliness > 1 "
"AND r.author LIKE 'M%' END INTERSECT SELECT * FROM default d WHERE ANY s IN d.reviews SATISFIES s.ratings.Rooms > 3 AND s.ratings.Overall > 1 END")
expected_results = self.run_cbq_query(
query="SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY r IN d.reviews SATISFIES r.ratings.Cleanliness > 1 "
"AND r.author LIKE 'M%' END INTERSECT SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY s IN d.reviews SATISFIES s.ratings.Rooms > 3 AND s.ratings.Overall > 1 END")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test union all scan between an any clause and an unnest clause'''
def test_flatten_intersectall_any_unnest(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)))")
self.run_cbq_query(query="CREATE INDEX `idx2` ON `default`((distinct (array flatten_keys(((`r`.`ratings`).`Rooms`), ((`r`.`ratings`).`Overall`)) for `r` in `reviews` end)),`email`)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%'"
" INTERSECT SELECT * FROM default d WHERE ANY s IN d.reviews SATISFIES r.ratings.Rooms > 1 AND r.ratings.Overall < 5 END")
self.assertTrue("IntersectAll" in str(explain_results), "The query should be using an instersect scan, check explain results {0}".format(explain_results))
self.assertTrue("Unnest" in str(explain_results), "The query should be using an unnest, check explain results {0}".format(explain_results))
self.assertTrue("idx1" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
self.assertTrue("idx2" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(
query="SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%'"
" INTERSECT SELECT * FROM default d WHERE ANY s IN d.reviews SATISFIES r.ratings.Rooms > 1 AND r.ratings.Overall < 5 END")
expected_results = self.run_cbq_query(
query="SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%'"
" INTERSECT SELECT * FROM default d USE INDEX (`#primary`) WHERE ANY s IN d.reviews SATISFIES r.ratings.Rooms > 1 AND r.ratings.Overall < 5 END")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
'''Test union all scan between two any/any and every arrays'''
def test_flatten_intersectall_unnest(self):
self.run_cbq_query(
query="CREATE INDEX `idx1` ON `default`((distinct (array flatten_keys((`r`.`author`), ((`r`.`ratings`).`Cleanliness`)) for `r` in `reviews` end)))")
self.run_cbq_query(query="CREATE INDEX `idx2` ON `default`((distinct (array flatten_keys(((`r`.`ratings`).`Rooms`), ((`r`.`ratings`).`Overall`)) for `r` in `reviews` end)),`email`)")
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(
query="EXPLAIN SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%' "
"INTERSECT SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
self.assertTrue("IntersectAll" in str(explain_results), "The query should be using an instersect scan, check explain results {0}".format(explain_results))
self.assertTrue("idx1" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
self.assertTrue("idx2" in str(explain_results),
"The query should be using an instersect scan between idx1 and idx2, check explain results {0}".format(
explain_results))
query_results = self.run_cbq_query(
query="SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%' "
"INTERSECT SELECT * FROM default d UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
expected_results = self.run_cbq_query(
query="SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Cleanliness = 1 AND r.author LIKE 'M%' "
"INTERSECT SELECT * FROM default d USE INDEX (`#primary`) UNNEST d.reviews AS r WHERE r.ratings.Rooms > 1 AND r.ratings.Overall < 5")
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
##############################################################################################
#
# nested tests
##############################################################################################
def test_flatten_triple_nested_covered(self):
self.load_nested()
self.run_cbq_query(query="CREATE INDEX idx1 ON default( ALL ARRAY(ALL ARRAY( ALL ARRAY FLATTEN_KEYS(sec.num,pg.description,ch.name) FOR sec IN pg.sections END) FOR pg IN ch.pages END) FOR ch IN chapters END, year, chapters, `type`)")
if self.use_unnest:
query = "SELECT d.year, d.`type` FROM default d UNNEST chapters AS ch UNNEST ch.pages AS pg " \
"UNNEST pg.sections AS sec WHERE d.`type` = 'book' AND ch.name = 'chapter 1' AND pg.description LIKE 'page%' AND sec.num = 1"
primary_query = "SELECT d.year, d.`type` FROM default d USE INDEX (`#primary`) UNNEST chapters AS ch UNNEST ch.pages AS pg " \
"UNNEST pg.sections AS sec WHERE d.`type` = 'book' AND ch.name = 'chapter 1' AND pg.description LIKE 'page%' AND sec.num = 1"
else:
query = "SELECT year, `type` FROM default d WHERE `type` = 'book' AND ANY ch IN chapters SATISFIES ANY pg IN ch.pages " \
"SATISFIES ANY sec IN pg.sections SATISFIES ch.name = 'chapter 1' AND pg.description LIKE 'page%' AND sec.num = 1 END END END"
primary_query = "SELECT year, `type` FROM default d USE INDEX (`#primary`) WHERE `type` = 'book' AND ANY ch IN chapters SATISFIES ANY pg IN ch.pages " \
"SATISFIES ANY sec IN pg.sections SATISFIES ch.name = 'chapter 1' AND pg.description LIKE 'page%' AND sec.num = 1 END END END"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(
explain_results))
self.assertTrue("covers" in str(explain_results),
"The index is not covering, it should be. Check plan {0}".format(explain_results))
self.compare_against_primary(query, primary_query)
def test_flatten_triple_nested_or(self):
self.load_nested()
self.run_cbq_query(query="CREATE INDEX idx1 ON default( ALL ARRAY(ALL ARRAY( ALL ARRAY FLATTEN_KEYS(sec.num,pg.description,ch.name) FOR sec IN pg.sections END) FOR pg IN ch.pages END) FOR ch IN chapters END, year, chapters, `type`)")
if self.use_unnest:
query = "SELECT d.year, d.`type` FROM default d UNNEST chapters AS ch UNNEST ch.pages AS pg " \
"UNNEST pg.sections AS sec WHERE d.`type` = 'book' OR (ch.name = 'chapter 1' AND pg.description LIKE 'page%' AND sec.num = 1)"
primary_query = "SELECT d.year, d.`type` FROM default d USE INDEX (`#primary`) UNNEST chapters AS ch UNNEST ch.pages AS pg " \
"UNNEST pg.sections AS sec WHERE d.`type` = 'book' OR (ch.name = 'chapter 1' AND pg.description LIKE 'page%' AND sec.num = 1)"
else:
query = "SELECT year, `type` FROM default d WHERE `type` = 'book' OR (ANY ch IN chapters SATISFIES ANY pg IN ch.pages " \
"SATISFIES ANY sec IN pg.sections SATISFIES ch.name = 'chapter 1' AND pg.description LIKE 'page%' AND sec.num = 1 END END END)"
primary_query = "SELECT year, `type` FROM default d USE INDEX (`#primary`) WHERE `type` = 'book' OR (ANY ch IN chapters SATISFIES ANY pg IN ch.pages " \
"SATISFIES ANY sec IN pg.sections SATISFIES ch.name = 'chapter 1' AND pg.description LIKE 'page%' AND sec.num = 1 END END END)"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("#primary" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
def test_flatten_triple_nested_multiple_levels(self):
self.load_nested()
self.run_cbq_query(
query="CREATE INDEX idx1 ON default( ALL ARRAY(ALL ARRAY( ALL ARRAY FLATTEN_KEYS(sec.num,pg.description,ch.name) FOR sec IN pg.sections END) FOR pg IN ch.pages END) FOR ch IN chapters END, year, chapters, `type`)")
query = "SELECT year, `type` FROM default d WHERE `type` = 'book' AND ANY ch IN chapters SATISFIES ANY pg IN ch.pages " \
"SATISFIES ANY sec IN pg.sections SATISFIES ch.name = 'chapter 1' AND sec.num = 1 END AND pg.description LIKE 'page%' END END"
primary_query = "SELECT year, `type` FROM default d USE INDEX (`#primary`) WHERE `type` = 'book' AND ANY ch IN chapters SATISFIES ANY pg IN ch.pages " \
"SATISFIES ANY sec IN pg.sections SATISFIES ch.name = 'chapter 1' AND sec.num = 1 END AND pg.description LIKE 'page%' END END"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("idx1" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
def test_flatten_triple_nested_unnest_any_mix(self):
self.load_nested()
self.run_cbq_query(
query="CREATE INDEX idx1 ON default( ALL ARRAY(ALL ARRAY( ALL ARRAY FLATTEN_KEYS(sec.num,pg.description,ch.name) FOR sec IN pg.sections END) FOR pg IN ch.pages END) FOR ch IN chapters END, year, chapters, `type`)")
query = "SELECT d.year, d.`type` FROM default d UNNEST chapters AS ch UNNEST ch.pages AS pg WHERE d.`type` = 'book' " \
"AND ch.name = 'chapter 1' AND pg.description LIKE 'page%' AND ANY sec IN pg.sections SATISFIES sec.num = 1 END"
primary_query = "SELECT d.year, d.`type` FROM default d USE INDEX (`#primary`) UNNEST chapters AS ch UNNEST ch.pages AS pg WHERE d.`type` = 'book' " \
"AND ch.name = 'chapter 1' AND pg.description LIKE 'page%' AND ANY sec IN pg.sections SATISFIES sec.num = 1 END"
# Ensure the query is actually using the flatten index instead of primary
explain_results = self.run_cbq_query(query="EXPLAIN " + query)
self.assertTrue("#primary" in str(explain_results),
"The query should be using idx1, check explain results {0}".format(
explain_results))
self.compare_against_primary(query, primary_query)
##############################################################################################
#
# Helper
##############################################################################################
def load_data(self, num_extra_buckets=0):
self.conn.delete_all_buckets()
time.sleep(5)
self.conn.create_bucket(bucket="default", ramQuotaMB=256, proxyPort=11220, storageBackend="magma", replicaNumber=0)
for i in range(0, num_extra_buckets):
self.conn.create_bucket(bucket="bucket{0}".format(i), ramQuotaMB=256, proxyPort=11220, storageBackend="magma", replicaNumber=0)
self.run_cbq_query("CREATE PRIMARY INDEX on bucket{0}".format(i))
self.run_cbq_query("CREATE PRIMARY INDEX on default")
self.buckets = self.conn.get_buckets()
self.query_buckets = self.buckets
self.gen_create = SDKDataLoader(num_ops=self.num_items)
for bucket in self.buckets:
self.cluster.async_load_gen_docs_till_dgm(server=self.master,
active_resident_threshold=self.active_resident_threshold,
bucket=bucket,
scope=None, collection=None,
exp=self.expiry,
value_size=self.value_size, timeout_mins=60,
java_sdk_client=self.java_sdk_client, kv_dataset=self.kv_dataset)
for bkt in self.buckets:
print(self.stat.get_collection_stats(bkt))
def load_nested(self):
upsert1 = "UPSERT INTO default VALUES ('book1', { 'type':'book', 'author': 'James', 'rev': 2, 'year': 2020, 'name': 'book 1', 'description': 'book 1 description', 'isbn': 1, 'chapters': [ {'num': 1, 'name': 'chapter 1', 'description': 'chapter 1 description', 'pages' : [ { 'num':1, 'name':'page 1', 'description': 'page 1 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':2, 'name':'page 2', 'description': 'page 2 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':3, 'name':'page 3', 'description': 'page 3 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] } ] }, {'num': 2, 'name': 'chapter 2', 'description': 'chapter 2 description', 'pages' : [ { 'num':1, 'name':'page 1', 'description': 'page 1 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':2, 'name':'page 2', 'description': 'page 2 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':3, 'name':'page 3', 'description': 'page 3 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] } ] }, {'num': 3, 'name': 'chapter 3', 'description': 'chapter 3 description', 'pages' : [ { 'num':1, 'name':'page 1', 'description': 'page 1 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':2, 'name':'page 2', 'description': 'page 2 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':3, 'name':'page 3', 'description': 'page 3 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] } ] } ] })"
upsert2 = "UPSERT INTO default VALUES ('book2', { 'type':'book', 'author': 'Mark', 'rev': 3, 'year': 2021, 'name': 'book 2', 'description': 'book 2 description', 'isbn': 2, 'chapters': [ {'num': 1, 'name': 'chapter 1', 'description': 'chapter 1 description', 'pages' : [ { 'num':1, 'name':'page 1', 'description': 'page 1 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':2, 'name':'page 2', 'description': 'page 2 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':3, 'name':'page 3', 'description': 'page 3 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] } ] }, {'num': 2, 'name': 'chapter 2', 'description': 'chapter 2 description', 'pages' : [ { 'num':1, 'name':'page 1', 'description': 'page 1 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':2, 'name':'page 2', 'description': 'page 2 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':3, 'name':'page 3', 'description': 'page 3 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] } ] }, {'num': 3, 'name': 'chapter 3', 'description': 'chapter 3 description', 'pages' : [ { 'num':1, 'name':'page 1', 'description': 'page 1 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':2, 'name':'page 2', 'description': 'page 2 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':3, 'name':'page 3', 'description': 'page 3 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] } ] } ] })"
upsert3 = "UPSERT INTO default VALUES ('book3', { 'type':'book', 'author': 'Chris', 'rev': 1, 'year': 2019, 'name': 'book 3', 'description': 'book 3 description', 'isbn': 3, 'chapters': [ {'num': 1, 'name': 'chapter 1', 'description': 'chapter 1 description', 'pages' : [ { 'num':1, 'name':'page 1', 'description': 'page 1 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':2, 'name':'page 2', 'description': 'page 2 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':3, 'name':'page 3', 'description': 'page 3 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] } ] }, {'num': 2, 'name': 'chapter 2', 'description': 'chapter 2 description', 'pages' : [ { 'num':1, 'name':'page 1', 'description': 'page 1 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':2, 'name':'page 2', 'description': 'page 2 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':3, 'name':'page 3', 'description': 'page 3 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] } ] }, {'num': 3, 'name': 'chapter 3', 'description': 'chapter 3 description', 'pages' : [ { 'num':1, 'name':'page 1', 'description': 'page 1 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':2, 'name':'page 2', 'description': 'page 2 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] }, { 'num':3, 'name':'page 3', 'description': 'page 3 description', 'sections':[ { 'num':1, 'name':'section 1', 'description': 'section 1 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':2, 'name':'section 2', 'description': 'section 2 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] }, { 'num':3, 'name':'section 3', 'description': 'section 3 description', 'paragraphs': [ { 'num': 1, 'name': 'paragraph 1', 'description': 'paragraph 1 description' }, { 'num': 2, 'name': 'paragraph 2', 'description': 'paragraph 2 description' }, { 'num': 3, 'name': 'paragraph 3', 'description': 'paragraph 3 description' } ] } ] } ] } ] })"
self.run_cbq_query(query = upsert1)
self.run_cbq_query(query = upsert2)
self.run_cbq_query(query = upsert3)
def compare_against_primary(self,query="", primary_query=""):
query_results = self.run_cbq_query(query=query)
expected_results = self.run_cbq_query(query=primary_query)
diffs = DeepDiff(query_results['results'], expected_results['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
def load_travel_sample(self):
self.rest.load_sample("travel-sample")
time.sleep(60)
self.wait_for_all_indexes_online() |
py | b409c9daae8873c565af6787caa8f93690534355 | from .clc import Clc
from .cld import Cld
from .cli import Cli
from .clv import Clv
from .sec import Sec
from .sed import Sed
from .sei import Sei
|
py | b409cad31c836eb06907cdf6be450700f05f7fb3 | # ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware
# Han Cai, Ligeng Zhu, Song Han
# International Conference on Learning Representations (ICLR), 2019.
import argparse
from search.models import MNISTRunConfig, TumorRunConfig, ImagenetRunConfig
from search.nas_manager import *
from search.models.super_nets.super_proxyless import SuperProxylessNASNets
# ref values
ref_values = {
'flops': {
'0.35': 59 * 1e6,
'0.50': 97 * 1e6,
'0.75': 209 * 1e6,
'1.00': 300 * 1e6,
'1.30': 509 * 1e6,
'1.40': 582 * 1e6,
},
# ms
'mobile': {
'1.00': 80,
},
'cpu': {},
'gpu8': {},
}
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default=None, help="path of the architectural config file")
parser.add_argument('--gpu', help='gpu available', default='0,1,2,3')
parser.add_argument('--resume', action='store_true')
parser.add_argument('--debug', help='freeze the weight parameters', action='store_true')
parser.add_argument('--manual_seed', default=0, type=int)
""" run config """
parser.add_argument('--n_epochs', type=int, default=250)
parser.add_argument('--init_lr', type=float, default=0.01)
# adjust the learning rate as training progresses
parser.add_argument('--lr_schedule_type', type=str, default='cosine')
parser.add_argument('--dataset', type=str, default='imagenet', choices=['imagenet', 'mnist', 'tumor_simul'])
parser.add_argument('--train_batch_size', type=int, default=1000)
parser.add_argument('--test_batch_size', type=int, default=1000)
parser.add_argument('--valid_size', type=float, default=0.1, help="ratio of the valid dataset size from total")
parser.add_argument('--opt_type', type=str, default='sgd', choices=['sgd'])
parser.add_argument('--momentum', type=float, default=0.9) # opt_param
parser.add_argument('--no_nesterov', action='store_true') # opt_param
parser.add_argument('--weight_decay', type=float, default=4e-5)
parser.add_argument('--label_smoothing', type=float, default=0)
parser.add_argument('--no_decay_keys', type=str, default=None, choices=[None, 'bn', 'bn#bias'])
parser.add_argument('--model_init', type=str, default='he_fout', choices=['he_fin', 'he_fout'])
parser.add_argument('--init_div_groups', action='store_true')
parser.add_argument('--validation_frequency', type=int, default=1)
parser.add_argument('--print_frequency', type=int, default=10)
parser.add_argument('--n_worker', type=int, default=32)
parser.add_argument('--resize_scale', type=float, default=0.08)
parser.add_argument('--distort_color', type=str, default='normal', choices=['normal', 'strong', 'None'])
"""
net config
From the paper:
Additionally, we use two hyperparameters to control the depth and width of a network in this architecture space, i.e.
B and F, which respectively represents the number of blocks at each stage (totally 3 stages) and the number of output
channels of the final block.
"""
parser.add_argument('--width_stages', default='24,40,80,96,192,320', help="input channels through blocks")
parser.add_argument('--n_cell_stages', default='4,4,4,4,4,1', help="number of cells in each stage")
parser.add_argument('--stride_stages', default='2,2,2,1,2,1', help="conv strides for each stage")
parser.add_argument('--width_mult', type=float, default=1.0, help="width multiplier for increasing the width")
parser.add_argument('--output_size', type=int, default=128)
parser.add_argument('--dims', type=int, default=3)
parser.add_argument('--bn_momentum', type=float, default=0.1)
parser.add_argument('--bn_eps', type=float, default=1e-3)
parser.add_argument('--dropout', type=float, default=0)
# architecture search config
""" arch search algo and warmup """
parser.add_argument('--arch_algo', type=str, default='grad', choices=['grad', 'rl'])
parser.add_argument('--warmup_epochs', type=int, default=10)
""" shared hyper-parameters """
parser.add_argument('--arch_init_type', type=str, default='uniform', choices=['normal', 'uniform'])
parser.add_argument('--arch_init_ratio', type=float, default=1e-3)
parser.add_argument('--arch_opt_type', type=str, default='adam', choices=['adam'])
parser.add_argument('--arch_lr', type=float, default=1e-3) #1e-3
parser.add_argument('--arch_adam_beta1', type=float, default=0) # arch_opt_param # 0
parser.add_argument('--arch_adam_beta2', type=float, default=0.999) # arch_opt_param
parser.add_argument('--arch_adam_eps', type=float, default=1e-8) # arch_opt_param
parser.add_argument('--arch_weight_decay', type=float, default=0)
parser.add_argument('--target_hardware', type=str, default=None, choices=['mobile', 'cpu', 'gpu8', 'flops', None])
""" Grad hyper-parameters """
parser.add_argument('--grad_update_arch_param_every', type=int, default=5)
parser.add_argument('--grad_update_steps', type=int, default=1)
parser.add_argument('--grad_binary_mode', type=str, default='full_v2', choices=['full_v2', 'full', 'two'])
parser.add_argument('--grad_data_batch', type=int, default=None)
parser.add_argument('--grad_reg_loss_type', type=str, default='mul#log', choices=['add#linear', 'mul#log'])
parser.add_argument('--grad_reg_loss_lambda', type=float, default=1e-1) # grad_reg_loss_params
parser.add_argument('--grad_reg_loss_alpha', type=float, default=0.2) # grad_reg_loss_params
parser.add_argument('--grad_reg_loss_beta', type=float, default=0.3) # grad_reg_loss_params
""" RL hyper-parameters """
parser.add_argument('--rl_batch_size', type=int, default=10)
parser.add_argument('--rl_update_per_epoch', action='store_true')
parser.add_argument('--rl_update_steps_per_epoch', type=int, default=300)
parser.add_argument('--rl_baseline_decay_weight', type=float, default=0.99)
parser.add_argument('--rl_tradeoff_ratio', type=float, default=0.1)
if __name__ == '__main__':
args = parser.parse_args()
torch.manual_seed(args.manual_seed)
torch.cuda.manual_seed_all(args.manual_seed)
np.random.seed(args.manual_seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
os.makedirs(args.path, exist_ok=True)
# build run config from args
args.lr_schedule_param = None
args.opt_param = {
'momentum': args.momentum,
'nesterov': not args.no_nesterov,
}
if args.dataset == 'imagenet':
run_config = ImagenetRunConfig(**args.__dict__)
elif args.dataset == 'mnist':
run_config = MNISTRunConfig(**args.__dict__)
elif args.dataset == 'tumor_simul':
run_config = TumorRunConfig(**args.__dict__)
# debug, adjust run_config
if args.debug:
run_config.train_batch_size = 256
run_config.test_batch_size = 256
run_config.valid_size = 256
run_config.n_worker = 0
width_stages_str = '-'.join(args.width_stages.split(','))
# build net from args
args.width_stages = [int(val) for val in args.width_stages.split(',')]
args.n_cell_stages = [int(val) for val in args.n_cell_stages.split(',')]
args.stride_stages = [int(val) for val in args.stride_stages.split(',')]
args.conv_candidates = {
'conv_group': [
'1x1_Conv', '3x3_Conv', '5x5_Conv', 'Zero'
],
'trans_conv_group': [
'1x1_Conv', '3x3_Conv', '5x5_Conv', '1x1_TransConv', '3x3_TransConv', '5x5_TransConv', '7x7_TransConv',
'Zero'
]
}
# create the complete architecture for NAS, based on the MobileNetV2 architecture
super_net = SuperProxylessNASNets(
width_stages=args.width_stages, n_cell_stages=args.n_cell_stages, stride_stages=args.stride_stages,
conv_candidates=args.conv_candidates, bn_param=(args.bn_momentum, args.bn_eps), output_size=args.output_size,
dims=args.dims
)
# build arch search config from args
if args.arch_opt_type == 'adam':
args.arch_opt_param = {
'betas': (args.arch_adam_beta1, args.arch_adam_beta2),
'eps': args.arch_adam_eps,
}
else:
args.arch_opt_param = None
if args.target_hardware is None:
args.ref_value = None
else:
args.ref_value = ref_values[args.target_hardware]['%.2f' % args.width_mult]
if args.arch_algo == 'grad':
from search.nas_manager import GradientArchSearchConfig
if args.grad_reg_loss_type == 'add#linear':
args.grad_reg_loss_params = {'lambda': args.grad_reg_loss_lambda}
elif args.grad_reg_loss_type == 'mul#log':
args.grad_reg_loss_params = {
'alpha': args.grad_reg_loss_alpha,
'beta': args.grad_reg_loss_beta,
}
else:
args.grad_reg_loss_params = None
arch_search_config = GradientArchSearchConfig(**args.__dict__)
elif args.arch_algo == 'rl':
from search.nas_manager import RLArchSearchConfig
arch_search_config = RLArchSearchConfig(**args.__dict__)
else:
raise NotImplementedError
print('Run config:')
for k, v in run_config.config.items():
print('\t%s: %s' % (k, v))
print('Architecture Search config:')
for k, v in arch_search_config.config.items():
print('\t%s: %s' % (k, v))
# arch search run manager
# inputs: config path, the built architecture, input dataset config and the gradient architecture configs
arch_search_run_manager = ArchSearchRunManager(args.path, super_net, run_config, arch_search_config)
# resume
if args.resume:
try:
arch_search_run_manager.load_model()
except Exception:
from pathlib import Path
home = str(Path.home())
warmup_path = os.path.join(
home, 'Workspace/Exp/arch_search/%s_ProxylessNAS_%.2f_%s/warmup.pth.tar' %
(run_config.dataset, args.width_mult, width_stages_str)
)
if os.path.exists(warmup_path):
print('load warmup weights')
arch_search_run_manager.load_model(model_fname=warmup_path)
else:
print('fail to load models')
# warmup
if arch_search_run_manager.warmup:
arch_search_run_manager.warm_up(warmup_epochs=args.warmup_epochs)
# joint training
arch_search_run_manager.train(fix_net_weights=args.debug)
|
py | b409cb1930398411de517525e7be680ba61f666e | from b_rabbit import BRabbit
rabbit = BRabbit(
host='localhost',
port=5672
)
publisher = BRabbit.EventPublisher(b_rabbit=rabbit, publisher_name='publisher', external=False)
publisher.publish(routing_key='publisher.pub',
payload='Hello from publisher',
important=False)
print('End')
|
py | b409cb1ce17de16883bdd75f68ce098ee92a4c8e | """Tests for slackmentions."""
import unittest
from slackperson import SlackPerson
from slackperson import SlackDataError
import slackmentions
USERLIST = {"members": [
{
"color": "ffffff",
"id": "U00000001",
"name": "jbiden",
"profile": {
"email": "[email protected]",
"first_name": "Joe",
"last_name": "Biden",
"real_name": "Joe Biden",
"real_name_normalized": "Joe Biden",
"team": "T00000001",
"title": ""
},
"real_name": "Joe Biden",
"team_id": "T00000001",
"tz": "America/New_York",
"tz_label": "Eastern Daylight Time",
"tz_offset": -14400,
},
{
"color": "000000",
"id": "U00000002",
"name": "bobama",
"profile": {
"email": "[email protected]",
"first_name": "Barack",
"last_name": "Obama",
"real_name": "Barack Obama",
"real_name_normalized": "Barack Obama",
"team": "T00000001"
},
"real_name": "Barack Obama",
"team_id": "T00000001",
"tz": "America/New_York",
"tz_label": "Eastern Daylight Time",
"tz_offset": -14400,
},
],
}
TESTTEXT1 = 'hey @jbiden, give me a call'
TESTTEXT2 = 'tell @jbiden and @bobama that I have a cool idea'
TESTTEXT3 = "tell @dtrump that he's not in our team."
class TestSlackMentions(unittest.TestCase):
"""Tests slackmentions."""
def setUp(self):
"""Set up some SlackPerson objects to use in tests."""
self.biden = SlackPerson('jbiden', USERLIST)
self.obama = SlackPerson('bobama', USERLIST)
def test_findperson(self):
"""Tests the findperson method."""
# test fine one person
test_people = slackmentions.findpeople(TESTTEXT1, USERLIST)
assert len(test_people) == 1
assert test_people[0].userid == 'U00000001'
# test find two people
test_people2 = slackmentions.findpeople(TESTTEXT2, USERLIST)
assert len(test_people2) == 2
assert test_people2[1].userid == 'U00000002'
# test error raised
self.assertRaises(
SlackDataError,
slackmentions.findpeople,
TESTTEXT3,
USERLIST
)
# test error swallowed
self.assertListEqual(
slackmentions.findpeople(TESTTEXT3, USERLIST, silent=True),
[]
)
def test_mention_text(self):
"""Tests mention_text."""
# test with people
self.assertEqual(
slackmentions.mention_text(TESTTEXT2,
people=[self.obama, self.biden]),
'tell <@U00000001> and <@U00000002> that I have a cool idea')
# test with userlist
self.assertEqual(
slackmentions.mention_text(TESTTEXT2,
userlist=USERLIST),
'tell <@U00000001> and <@U00000002> that I have a cool idea')
# raises an error
self.assertRaises(
SlackDataError,
slackmentions.mention_text,
TESTTEXT3,
userlist=USERLIST
)
# swallows the error
self.assertEqual(
slackmentions.mention_text(
TESTTEXT3,
userlist=USERLIST,
silent=True),
TESTTEXT3
)
# Illegal Arguments
self.assertRaises(
ValueError,
slackmentions.mention_text,
TESTTEXT1
)
def test_clean_text(self):
"""Test cleaning the text of mentions."""
# working correctly
self.assertEqual(
slackmentions.clean_text(TESTTEXT1, userlist=USERLIST),
'hey, give me a call')
self.assertEqual(
slackmentions.clean_text(TESTTEXT1, people=[self.obama,
self.biden]),
'hey, give me a call')
self.assertEqual(
slackmentions.clean_text(TESTTEXT2, people=[self.obama,
self.biden]),
'tell and that I have a cool idea')
self.assertEqual(
slackmentions.clean_text('@bobama hi', people=[self.obama]),
'hi')
# raise a SlackDataError
self.assertRaises(
SlackDataError,
slackmentions.clean_text,
TESTTEXT3,
userlist=USERLIST
)
# swallow the error
self.assertEqual(
slackmentions.clean_text(TESTTEXT3, userlist=USERLIST,
silent=True),
"tell @dtrump that he's not in our team."
)
# nuke everything
self.assertEqual(
slackmentions.clean_text(TESTTEXT3, clean_all=True),
"tell that he's not in our team."
)
# Illegal argument combination
self.assertRaises(
ValueError,
slackmentions.clean_text,
TESTTEXT1
)
|
py | b409cc79f08de0260085ed3227f5f1e98a837a44 | import os
import os.path
from typing import Any, Callable, Dict, List, Optional, Union, Tuple
from PIL import Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
CATEGORIES_2021 = ["kingdom", "phylum", "class", "order", "family", "genus"]
DATASET_URLS = {
"2017": "https://ml-inat-competition-datasets.s3.amazonaws.com/2017/train_val_images.tar.gz",
"2018": "https://ml-inat-competition-datasets.s3.amazonaws.com/2018/train_val2018.tar.gz",
"2019": "https://ml-inat-competition-datasets.s3.amazonaws.com/2019/train_val2019.tar.gz",
"2021_train": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/train.tar.gz",
"2021_train_mini": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/train_mini.tar.gz",
"2021_valid": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/val.tar.gz",
}
DATASET_MD5 = {
"2017": "7c784ea5e424efaec655bd392f87301f",
"2018": "b1c6952ce38f31868cc50ea72d066cc3",
"2019": "c60a6e2962c9b8ccbd458d12c8582644",
"2021_train": "38a7bb733f7a09214d44293460ec0021",
"2021_train_mini": "db6ed8330e634445efc8fec83ae81442",
"2021_valid": "f6f6e0e242e3d4c9569ba56400938afc",
}
class INaturalist(VisionDataset):
"""`iNaturalist <https://github.com/visipedia/inat_comp>`_ Dataset.
Args:
root (string): Root directory of dataset where the image files are stored.
This class does not require/use annotation files.
version (string, optional): Which version of the dataset to download/use. One of
'2017', '2018', '2019', '2021_train', '2021_train_mini', '2021_valid'.
Default: `2021_train`.
target_type (string or list, optional): Type of target to use, for 2021 versions, one of:
- ``full``: the full category (species)
- ``kingdom``: e.g. "Animalia"
- ``phylum``: e.g. "Arthropoda"
- ``class``: e.g. "Insecta"
- ``order``: e.g. "Coleoptera"
- ``family``: e.g. "Cleridae"
- ``genus``: e.g. "Trichodes"
for 2017-2019 versions, one of:
- ``full``: the full (numeric) category
- ``super``: the super category, e.g. "Amphibians"
Can also be a list to output a tuple with all specified target types.
Defaults to ``full``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(
self,
root: str,
version: str = "2021_train",
target_type: Union[List[str], str] = "full",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
self.version = verify_str_arg(version, "version", DATASET_URLS.keys())
super().__init__(os.path.join(root, version), transform=transform, target_transform=target_transform)
os.makedirs(root, exist_ok=True)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.all_categories: List[str] = []
# map: category type -> name of category -> index
self.categories_index: Dict[str, Dict[str, int]] = {}
# list indexed by category id, containing mapping from category type -> index
self.categories_map: List[Dict[str, int]] = []
if not isinstance(target_type, list):
target_type = [target_type]
if self.version[:4] == "2021":
self.target_type = [verify_str_arg(t, "target_type", ("full", *CATEGORIES_2021)) for t in target_type]
self._init_2021()
else:
self.target_type = [verify_str_arg(t, "target_type", ("full", "super")) for t in target_type]
self._init_pre2021()
# index of all files: (full category id, filename)
self.index: List[Tuple[int, str]] = []
for dir_index, dir_name in enumerate(self.all_categories):
files = os.listdir(os.path.join(self.root, dir_name))
for fname in files:
self.index.append((dir_index, fname))
def _init_2021(self) -> None:
"""Initialize based on 2021 layout"""
self.all_categories = sorted(os.listdir(self.root))
# map: category type -> name of category -> index
self.categories_index = {k: {} for k in CATEGORIES_2021}
for dir_index, dir_name in enumerate(self.all_categories):
pieces = dir_name.split("_")
if len(pieces) != 8:
raise RuntimeError(f"Unexpected category name {dir_name}, wrong number of pieces")
if pieces[0] != f"{dir_index:05d}":
raise RuntimeError(f"Unexpected category id {pieces[0]}, expecting {dir_index:05d}")
cat_map = {}
for cat, name in zip(CATEGORIES_2021, pieces[1:7]):
if name in self.categories_index[cat]:
cat_id = self.categories_index[cat][name]
else:
cat_id = len(self.categories_index[cat])
self.categories_index[cat][name] = cat_id
cat_map[cat] = cat_id
self.categories_map.append(cat_map)
def _init_pre2021(self) -> None:
"""Initialize based on 2017-2019 layout"""
# map: category type -> name of category -> index
self.categories_index = {"super": {}}
cat_index = 0
super_categories = sorted(os.listdir(self.root))
for sindex, scat in enumerate(super_categories):
self.categories_index["super"][scat] = sindex
subcategories = sorted(os.listdir(os.path.join(self.root, scat)))
for subcat in subcategories:
if self.version == "2017":
# this version does not use ids as directory names
subcat_i = cat_index
cat_index += 1
else:
try:
subcat_i = int(subcat)
except ValueError:
raise RuntimeError(f"Unexpected non-numeric dir name: {subcat}")
if subcat_i >= len(self.categories_map):
old_len = len(self.categories_map)
self.categories_map.extend([{}] * (subcat_i - old_len + 1))
self.all_categories.extend([""] * (subcat_i - old_len + 1))
if self.categories_map[subcat_i]:
raise RuntimeError(f"Duplicate category {subcat}")
self.categories_map[subcat_i] = {"super": sindex}
self.all_categories[subcat_i] = os.path.join(scat, subcat)
# validate the dictionary
for cindex, c in enumerate(self.categories_map):
if not c:
raise RuntimeError(f"Missing category {cindex}")
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where the type of target specified by target_type.
"""
cat_id, fname = self.index[index]
img = Image.open(os.path.join(self.root, self.all_categories[cat_id], fname))
target: Any = []
for t in self.target_type:
if t == "full":
target.append(cat_id)
else:
target.append(self.categories_map[cat_id][t])
target = tuple(target) if len(target) > 1 else target[0]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.index)
def category_name(self, category_type: str, category_id: int) -> str:
"""
Args:
category_type(str): one of "full", "kingdom", "phylum", "class", "order", "family", "genus" or "super"
category_id(int): an index (class id) from this category
Returns:
the name of the category
"""
if category_type == "full":
return self.all_categories[category_id]
else:
if category_type not in self.categories_index:
raise ValueError(f"Invalid category type '{category_type}'")
else:
for name, id in self.categories_index[category_type].items():
if id == category_id:
return name
raise ValueError(f"Invalid category id {category_id} for {category_type}")
def _check_integrity(self) -> bool:
return os.path.exists(self.root) and len(os.listdir(self.root)) > 0
def download(self) -> None:
if self._check_integrity():
raise RuntimeError(
f"The directory {self.root} already exists. "
f"If you want to re-download or re-extract the images, delete the directory."
)
base_root = os.path.dirname(self.root)
download_and_extract_archive(
DATASET_URLS[self.version], base_root, filename=f"{self.version}.tgz", md5=DATASET_MD5[self.version]
)
orig_dir_name = os.path.join(base_root, os.path.basename(DATASET_URLS[self.version]).rstrip(".tar.gz"))
if not os.path.exists(orig_dir_name):
raise RuntimeError(f"Unable to find downloaded files at {orig_dir_name}")
os.rename(orig_dir_name, self.root)
print(f"Dataset version '{self.version}' has been downloaded and prepared for use")
|
py | b409cd5c68f4f06be081a4beeeb13cd46b58d65a | from typing import Dict
import urllib3
from CommonServerPython import *
# Disable insecure warnings
urllib3.disable_warnings()
VENDOR_NAME = 'Anomali Enterprise'
''' CLIENT CLASS '''
class Client(BaseClient):
"""
Client to use in the Anomali Enterprise integration. Overrides BaseClient
"""
def __init__(self, server_url: str, username: str, password: str, verify: bool, proxy: bool):
headers = {
'Content-Type': 'application/json',
'ae-authorization': f'{username}:{password}'
}
super().__init__(base_url=server_url, verify=verify, proxy=proxy, headers=headers)
self._username = username
self._password = password
def start_search_job_request(self, from_: str, to_: str, indicators: List[str]) -> dict:
"""Initiate a search job.
Args:
from_: from which time to initiate the search
to_: to which time to initiate the search
indicators: indicators to search
Returns:
Response from API.
"""
data = {'username': self._username, 'password': self._password, 'from': from_, 'to': to_,
'indicators': indicators}
return self._http_request(method='POST', url_suffix='/api/v1/mars/forensic', headers=self._headers,
json_data=data)
def get_search_job_result_request(self, job_id: str) -> dict:
"""Retrieve a search job results.
Args:
job_id: the search job uuid
Returns:
Response from API.
"""
params = {'jobid': job_id}
return self._http_request(method='GET', url_suffix='/api/v1/mars/forensic', headers=self._headers,
params=params)
def domain_request(self, domain: List[str]) -> dict:
"""Retrieve information regarding a domain.
Args:
domain: the domain name to search
Returns:
Response from API.
"""
data = {'username': self._username, 'password': self._password, 'domains': domain}
return self._http_request(method='POST', url_suffix='/api/v1/mars/dga_score', headers=self._headers,
json_data=data)
''' COMMAND FUNCTIONS '''
def module(client: Client) -> str:
"""
Performs basic get request
"""
response = client.domain_request(argToList('google.com'))
if response.get('result') != 'success':
raise Exception('To Use Anomali Enterprise, make sure you are using the current username and password '
'and have the needed permissions.')
return 'ok'
def start_search_job(client: Client, args: dict) -> CommandResults:
"""Start a search job for IOCs.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
CommandResults.
"""
from_ = str(args.get('from', '1 day'))
to_ = str(args.get('to', ''))
indicators = argToList(args.get('indicators'))
timestamp_format = '%Y-%m-%dT%H:%M:%S.%f'
from_iso = parse_date_range(from_, date_format=timestamp_format)[0]
if to_:
to_iso = parse_date_range(to_, date_format=timestamp_format)[0]
else:
to_iso = datetime.now().strftime(timestamp_format)
response = client.start_search_job_request(from_iso, to_iso, indicators)
start_search_outputs = {
'status': 'in progress',
'job_id': response.get('jobid', '')
}
return CommandResults(
outputs_prefix='AnomaliEnterprise.ForensicSearch',
outputs_key_field='job_id',
outputs=start_search_outputs,
readable_output=tableToMarkdown(name="Forensic search started:", t=start_search_outputs, removeNull=True),
raw_response=response
)
def get_search_job_result(client: Client, args: Dict) -> CommandResults:
"""Get the search job result.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
CommandResults.
"""
job_id = str(args.get('job_id'))
limit = int(args.get('limit', '20'))
verbose = args.get('verbose', 'true') == 'true'
response = client.get_search_job_result_request(job_id)
if 'error' in response:
raise Exception(f"{str(response.get('error'))}. Job ID might have expired.")
outputs = response
outputs.update({'job_id': job_id})
if not response.get('complete'):
human_readable = f'job ID: {job_id} is still in progress.'
outputs.update({'status': 'in progress'})
else:
if response.get('totalMatches'):
headers = ['status', 'job_id', 'category', 'totalFiles', 'scannedEvents']
human_readable = tableToMarkdown(name="Forensic search metadata:", t=response, headers=headers,
removeNull=True)
if verbose:
human_readable += tableToMarkdown(name="Forensic search results:",
t=response.get('streamResults', [])[:limit], removeNull=True)
if 'streamResults' in outputs:
outputs['streamResults'] = outputs.get('streamResults', [])[:limit] # limit the outputs to the context
else:
human_readable = f'No matches found for the given job ID: {job_id}.'
response.update({'status': 'completed'})
return CommandResults(
outputs_prefix='AnomaliEnterprise.ForensicSearch',
outputs_key_field='job_id',
outputs=response,
readable_output=human_readable,
raw_response=response
)
def dga_domain_status(client: Client, args: dict) -> CommandResults:
"""Search domain DGA status.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
CommandResults.
"""
domains = argToList(str(args.get('domains')))
response = client.domain_request(domains)
domains_data = response.get('data', {})
outputs = []
for domain in domains:
output = {
'domain': domain,
'malware_family': domains_data.get(domain, {}).get('malware_family'),
'probability': domains_data.get(domain, {}).get('probability')
}
outputs.append(output)
return CommandResults(
outputs_prefix='AnomaliEnterprise.DGA',
outputs_key_field='domain',
outputs=outputs,
readable_output=tableToMarkdown(name="Domains DGA:", t=outputs, removeNull=True),
raw_response=response
)
def domain_command(client: Client, args: dict) -> List[CommandResults]:
"""Search domain DGA status.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
CommandResults and DBotScore.
"""
domain_list = argToList(args.get('domain'))
response = client.domain_request(domain_list)
domains_data = response.get('data', {})
command_results_list = []
for domain in domain_list:
output = {
'domain': domain,
'malware_family': domains_data.get(domain, {}).get('malware_family'),
'probability': domains_data.get(domain, {}).get('probability')
}
score = calculate_dbot_score(domains_data.get(domain, {}))
dbot_score = Common.DBotScore(
indicator=domain,
indicator_type=DBotScoreType.DOMAIN,
integration_name=VENDOR_NAME,
score=score,
malicious_description=str(output.get('malware_family', ''))
)
domain = Common.Domain(
domain=domain,
dbot_score=dbot_score,
)
command_results = CommandResults(
outputs_prefix='AnomaliEnterprise.DGA',
outputs_key_field='domain',
outputs=output,
readable_output=tableToMarkdown(name="Domains DGA:", t=output, removeNull=True),
indicator=domain,
raw_response=response
)
command_results_list.append(command_results)
return command_results_list
def calculate_dbot_score(domain_data: dict) -> int:
"""There is no distinction between benign to unknown domains in Anomali Enterprise
malware family exists and prob > 0.6 -> 3
malware family exists and prob < 0.6 -> 2
else -> 0
Args:
domain_data: the domain data
Returns:
DBot Score.
"""
score = Common.DBotScore.NONE
if domain_data.get('malware_family', {}):
if float(domain_data.get('probability', 0)) > 0.6:
score = Common.DBotScore.BAD
else:
score = Common.DBotScore.SUSPICIOUS
return score
''' MAIN FUNCTION '''
def main() -> None:
"""
Parse and validates integration params, runs integration commands.
"""
params = demisto.params()
server_url = params.get('url')
username = params.get('credentials', {}).get('identifier')
password = params.get('credentials', {}).get('password')
verify = not params.get('insecure', False)
proxy = params.get('proxy') is True
command = demisto.command()
LOG(f'Command being called in {VENDOR_NAME} is: {command}')
try:
client = Client(server_url=server_url, username=username, password=password, verify=verify, proxy=proxy)
commands = {
'anomali-enterprise-retro-forensic-search': start_search_job,
'anomali-enterprise-retro-forensic-search-results': get_search_job_result,
'anomali-enterprise-dga-domain-status': dga_domain_status,
'domain': domain_command,
}
if command == 'test-module':
return_results(module(client))
elif command in commands:
return_results(commands[command](client, demisto.args()))
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
except Exception as err:
return_error(f'Failed to execute {command} command. Error: {str(err)} \n '
f'tracback: {traceback.format_exc()}')
''' ENTRY POINT '''
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
|
py | b409ce0410dbd847d76a0e2913c183b8ee9d8dd8 | def multiply(lista):
result = lista[0]
for el in lista:
result *= el
return result
if __name__ == '__main__':
lista = [1,2,3,4]
print(multiply(lista))
|
py | b409ce22e99015da6ea92f084467e87e08d9e015 | #!/usr/bin/env python
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import numpy
import os
import sys
import platform
import traceback
win_cuda_dir = None
def find_cuda():
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = os.path.join(home, 'bin', 'nvcc')
else:
nvcc = None
for dir in os.environ['PATH'].split(os.pathsep):
binpath = os.path.join(dir, 'nvcc')
if os.path.exists(binpath):
nvcc = os.path.abspath(binpath)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be located in '
'your $PATH. Either add it to your path, or'
'set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home': home, 'nvcc': nvcc,
'include': os.path.join(home, 'include')}
for k, v in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in '
'%s' % (k, v))
libdir = os.path.join(home, 'lib')
arch = int(platform.architecture()[0][0:2])
if sys.platform.startswith('win'):
os.path.join(libdir, "x"+str(arch))
if os.path.exists(os.path.join(home, libdir + "64")):
cudaconfig['lib'] = libdir + "64"
elif os.path.exists(os.path.join(home, libdir)):
cudaconfig['lib'] = libdir
else:
raise EnvironmentError('The CUDA libraries could not be located')
return cudaconfig
try:
CUDA = find_cuda()
except EnvironmentError:
CUDA = None
print("Proceeding without CUDA")
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
def customize_compiler_for_nvcc(self):
'''This is a verbatim copy of the NVCC compiler extension from
https://github.com/rmcgibbo/npcuda-example
'''
self.src_extensions.append('.cu')
default_compiler_so = self.compiler_so
super = self._compile
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
self.set_executable('compiler_so', CUDA['nvcc'])
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['cc']
super(obj, src, ext, cc_args, postargs, pp_opts)
self.compiler_so = default_compiler_so
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
cmdclass = {}
if sys.platform.startswith('win') and win_cuda_dir is not None:
if win_cuda_dir == "":
if 'CUDA_PATH' in os.environ:
win_cuda_dir = os.environ['CUDA_PATH']
elif os.path.exists(win_cuda_dir):
pass
else:
win_cuda_dir = None
if win_cuda_dir:
arch = int(platform.architecture()[0][0:2])
somoclu_module = Extension('_somoclu_wrap',
sources=['somoclu/somoclu_wrap.cxx'],
extra_objects=[
'somoclu/src/denseCpuKernels.obj',
'somoclu/src/sparseCpuKernels.obj',
'somoclu/src/training.obj',
'somoclu/src/mapDistanceFunctions.obj',
'somoclu/src/uMatrix.obj',
'somoclu/src/denseGpuKernels.cu.obj'],
define_macros=[('CUDA', None)],
library_dirs=[win_cuda_dir+"/lib/x"+str(arch)],
libraries=['cudart', 'cublas'],
include_dirs=[numpy_include])
else:
sources_files = ['somoclu/src/denseCpuKernels.cpp',
'somoclu/src/sparseCpuKernels.cpp',
'somoclu/src/mapDistanceFunctions.cpp',
'somoclu/src/training.cpp',
'somoclu/src/uMatrix.cpp',
'somoclu/somoclu_wrap.cxx']
if sys.platform.startswith('win'):
extra_compile_args = ['-openmp']
cmdclass = {}
somoclu_module = Extension('_somoclu_wrap',
sources=sources_files,
include_dirs=[numpy_include, 'src'],
extra_compile_args=extra_compile_args,
)
else:
extra_compile_args = ['-fopenmp']
if 'CC' in os.environ and 'clang-omp' in os.environ['CC']:
openmp = 'iomp5'
else:
openmp = 'gomp'
cmdclass = {'build_ext': custom_build_ext}
somoclu_module = Extension('_somoclu_wrap',
sources=sources_files,
include_dirs=[numpy_include, 'src'],
extra_compile_args={'cc': extra_compile_args},
libraries=[openmp],
)
if CUDA is not None:
somoclu_module.sources.append('somoclu/src/denseGpuKernels.cu')
somoclu_module.define_macros = [('CUDA', None)]
somoclu_module.include_dirs.append(CUDA['include'])
somoclu_module.library_dirs = [CUDA['lib']]
somoclu_module.libraries += ['cudart', 'cublas']
somoclu_module.runtime_library_dirs = [CUDA['lib']]
somoclu_module.extra_compile_args['nvcc']=['-use_fast_math',
'--ptxas-options=-v', '-c',
'--compiler-options','-fPIC ' +
extra_compile_args[0]]
try:
setup(name='somoclu',
version='1.7.6',
license='GPL3',
author="Peter Wittek, Shi Chao Gao",
author_email="",
maintainer="shichaogao",
maintainer_email="[email protected]",
url="https://somoclu.readthedocs.io/",
platforms=["unix", "windows"],
description="Massively parallel implementation of self-organizing maps",
ext_modules=[somoclu_module],
packages=["somoclu"],
install_requires=['numpy', 'matplotlib', 'scipy'],
classifiers=[
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: C++'
],
cmdclass=cmdclass,
test_suite="tests"
)
except:
traceback.print_exc()
setup(name='somoclu',
version='1.7.6',
license='GPL3',
author="Peter Wittek, Shi Chao Gao",
author_email="",
maintainer="shichaogao",
maintainer_email="[email protected]",
url="https://somoclu.readthedocs.io/",
platforms=["unix", "windows"],
description="Massively parallel implementation of self-organizing maps",
packages=["somoclu"],
classifiers=[
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: C++'
],
install_requires=['numpy', 'matplotlib', 'scipy'],
test_suite="tests"
)
|
py | b409cef0ab667294cf640ff028dd4417cf060043 | import csv
import datetime
import os
import pathlib
import re
import sys
import codecs
import sqlite3
import string
import binascii
import math
from bs4 import BeautifulSoup
class OutputParameters:
'''Defines the parameters that are common for '''
# static parameters
nl = '\n'
screen_output_file_path = ''
def __init__(self, output_folder):
now = datetime.datetime.now()
currenttime = str(now.strftime('%Y-%m-%d_%A_%H%M%S'))
self.report_folder_base = os.path.join(output_folder, 'iLEAPP_Reports_' + currenttime) # aleapp , aleappGUI, ileap_artifacts, report.py
self.temp_folder = os.path.join(self.report_folder_base, 'temp')
OutputParameters.screen_output_file_path = os.path.join(self.report_folder_base, 'Script Logs', 'Screen Output.html')
OutputParameters.screen_output_file_path_devinfo = os.path.join(self.report_folder_base, 'Script Logs', 'DeviceInfo.html')
os.makedirs(os.path.join(self.report_folder_base, 'Script Logs'))
os.makedirs(self.temp_folder)
def is_platform_windows():
'''Returns True if running on Windows'''
return os.name == 'nt'
def sanitize_file_path(filename, replacement_char='_'):
'''
Removes illegal characters (for windows) from the string passed. Does not replace \ or /
'''
return re.sub(r'[*?:"<>|\'\r\n]', replacement_char, filename)
def sanitize_file_name(filename, replacement_char='_'):
'''
Removes illegal characters (for windows) from the string passed.
'''
return re.sub(r'[\\/*?:"<>|\'\r\n]', replacement_char, filename)
def get_next_unused_name(path):
'''Checks if path exists, if it does, finds an unused name by appending -xx
where xx=00-99. Return value is new path.
If it is a file like abc.txt, then abc-01.txt will be the next
'''
folder, basename = os.path.split(path)
ext = None
if basename.find('.') > 0:
basename, ext = os.path.splitext(basename)
num = 1
new_name = basename
if ext != None:
new_name += f"{ext}"
while os.path.exists(os.path.join(folder, new_name)):
new_name = basename + "-{:02}".format(num)
if ext != None:
new_name += f"{ext}"
num += 1
return os.path.join(folder, new_name)
def does_column_exist_in_db(db, table_name, col_name):
'''Checks if a specific col exists'''
col_name = col_name.lower()
try:
db.row_factory = sqlite3.Row # For fetching columns by name
query = f"pragma table_info('{table_name}');"
cursor = db.cursor()
cursor.execute(query)
all_rows = cursor.fetchall()
for row in all_rows:
if row['name'].lower() == col_name:
return True
except sqlite3.Error as ex:
print(f"Query error, query={query} Error={str(ex)}")
pass
return False
class GuiWindow:
'''This only exists to hold window handle if script is run from GUI'''
window_handle = None # static variable
progress_bar_total = 0
progress_bar_handle = None
@staticmethod
def SetProgressBar(n):
if GuiWindow.progress_bar_handle:
GuiWindow.progress_bar_handle.UpdateBar(n)
def logfunc(message=""):
with open(OutputParameters.screen_output_file_path, 'a', encoding='utf8') as a:
print(message)
a.write(message + '<br>' + OutputParameters.nl)
if GuiWindow.window_handle:
GuiWindow.window_handle.refresh()
def logdevinfo(message=""):
with open(OutputParameters.screen_output_file_path_devinfo, 'a', encoding='utf8') as b:
b.write(message + '<br>' + OutputParameters.nl)
def tsv(report_folder, data_headers, data_list, tsvname):
report_folder = report_folder.rstrip('/')
report_folder = report_folder.rstrip('\\')
report_folder_base, tail = os.path.split(report_folder)
tsv_report_folder = os.path.join(report_folder_base, '_TSV Exports')
if os.path.isdir(tsv_report_folder):
pass
else:
os.makedirs(tsv_report_folder)
with codecs.open(os.path.join(tsv_report_folder, tsvname +'.tsv'), 'a', 'utf-8-sig') as tsvfile:
tsv_writer = csv.writer(tsvfile, delimiter='\t')
tsv_writer.writerow(data_headers)
for i in data_list:
tsv_writer.writerow(i)
def timeline(report_folder, tlactivity, data_list, data_headers):
report_folder = report_folder.rstrip('/')
report_folder = report_folder.rstrip('\\')
report_folder_base, tail = os.path.split(report_folder)
tl_report_folder = os.path.join(report_folder_base, '_Timeline')
if os.path.isdir(tl_report_folder):
tldb = os.path.join(tl_report_folder, 'tl.db')
db = sqlite3.connect(tldb)
cursor = db.cursor()
cursor.execute('''PRAGMA synchronous = EXTRA''')
cursor.execute('''PRAGMA journal_mode = WAL''')
else:
os.makedirs(tl_report_folder)
#create database
tldb = os.path.join(tl_report_folder, 'tl.db')
db = sqlite3.connect(tldb, isolation_level = 'exclusive')
cursor = db.cursor()
cursor.execute(
"""
CREATE TABLE data(key TEXT, activity TEXT, datalist TEXT)
"""
)
db.commit()
a = 0
length = (len(data_list))
while a < length:
modifiedList = list(map(lambda x, y: x.upper() + ': ' + str(y), data_headers, data_list[a]))
cursor.executemany("INSERT INTO data VALUES(?,?,?)", [(str(data_list[a][0]), tlactivity.upper(), str(modifiedList))])
a += 1
db.commit()
db.close()
''' Returns string of printable characters. Replacing non-printable characters
with '.', or CHR(46)
``'''
def strings_raw(data):
return "".join([chr(byte) if byte >= 0x20 and byte < 0x7F else chr(46) for byte in data])
''' Returns string of printable characters. Works similar to the Linux
`string` function.
'''
def strings(data):
cleansed = "".join([chr(byte) if byte >= 0x20 and byte < 0x7F else chr(0) for byte in data])
return filter(lambda string: len(string) >= 4, cleansed.split(chr(0)))
''' Retuns HTML table of the hexdump of the passed in data.
'''
def generate_hexdump(data, char_per_row = 5):
data_hex = binascii.hexlify(data).decode('utf-8')
str_raw = strings_raw(data)
str_hex = ''
str_ascii = ''
''' Generates offset column
'''
offset_rows = math.ceil(len(data_hex)/(char_per_row * 2))
offsets = [i for i in range(0, len(data_hex), char_per_row)][:offset_rows]
str_offset = '<br>'.join([ str(hex(s)[2:]).zfill(4).upper() for s in offsets ])
''' Generates hex data column
'''
c = 0
for i in range(0, len(data_hex), 2):
str_hex += data_hex[i:i + 2] + ' '
if c == char_per_row - 1:
str_hex += '<br>'
c = 0
else:
c += 1
''' Generates ascii column of data
'''
for i in range(0, len(str_raw), char_per_row):
str_ascii += str_raw[i:i + char_per_row] + '<br>'
return f'''
<table id="GeoLocationHexTable" aria-describedby="GeoLocationHexTable" cellspacing="0">
<thead>
<tr>
<th style="border-right: 1px solid #000;border-bottom: 1px solid #000;">Offset</th>
<th style="width: 100px; border-right: 1px solid #000;border-bottom: 1px solid #000;">Hex</th>
<th style="border-bottom: 1px solid #000;">Ascii</th>
</tr>
</thead>
<tbody>
<tr>
<td style="white-space:nowrap; border-right: 1px solid #000;">{str_offset}</td>
<td style="border-right: 1px solid #000; white-space:nowrap;">{str_hex}</td>
<td style="white-space:nowrap;">{str_ascii}</td>
</tr></tbody></table>
'''
|
py | b409cf20e9a098af8267113891cdf24dbceb1bc6 | """
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
import datetime
import struct
import sys
import numpy as np
from dateutil.relativedelta import relativedelta
from pandas._libs.lib import max_len_string_array, infer_dtype
from pandas._libs.tslib import NaT, Timestamp
import pandas as pd
from pandas import compat, to_timedelta, to_datetime, isna, DatetimeIndex
from pandas.compat import (lrange, lmap, lzip, text_type, string_types, range,
zip, BytesIO)
from pandas.core.base import StringMixin
from pandas.core.arrays import Categorical
from pandas.core.dtypes.common import (is_categorical_dtype, _ensure_object,
is_datetime64_dtype)
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.io.common import (get_filepath_or_buffer, BaseIterator,
_stringify_path)
from pandas.util._decorators import Appender
from pandas.util._decorators import deprecate_kwarg
VALID_ENCODINGS = ('ascii', 'us-ascii', 'latin-1', 'latin_1', 'iso-8859-1',
'iso8859-1', '8859', 'cp819', 'latin', 'latin1', 'L1')
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
"115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)")
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables"""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. None defaults to latin-1."""
_statafile_processing_params2 = """\
index_col : string, optional, default: None
Column to set as index
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64)
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines"""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object"""
_read_stata_doc = """Read Stata file into DataFrame
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
Examples
--------
Read a Stata dta file:
>>> df = pandas.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>>> itr = pandas.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
""" % (_statafile_processing_params1, _encoding_params,
_statafile_processing_params2, _chunksize_params,
_iterator_params)
_data_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
.. deprecated::
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
""" % (_statafile_processing_params1, _statafile_processing_params2,
_encoding_params, _chunksize_params)
@Appender(_read_stata_doc)
@deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index_col=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index_col=index_col, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize, encoding=encoding)
if iterator or chunksize:
data = reader
else:
try:
data = reader.read()
finally:
reader.close()
return data
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> import pandas as pd
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas.
Otherwise it falls back to a slower but more robust method
using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
else:
index = getattr(year, 'index', None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)],
index=index)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return (to_datetime(year, format='%Y') +
to_timedelta(days, unit='d'))
else:
index = getattr(year, 'index', None)
value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, 'index', None)
if unit == 'd':
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == 'ms':
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [base + relativedelta(microseconds=(int(d) * 1000))
for d in deltas]
return Series(values, index=index)
else:
raise ValueError('format not understood')
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt.startswith(("%tc", "tc")): # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, 'ms')
elif fmt.startswith(("%tC", "tC")):
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = pd.NaT
return conv_dates
# Delta days relative to base
elif fmt.startswith(("%td", "td", "%d", "d")):
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, 'd')
# does not count leap days - 7 days is a week.
# 52nd week may have more than 7 days
elif fmt.startswith(("%tw", "tw")):
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt.startswith(("%tm", "tm")): # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%th", "th")): # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%ty", "ty")): # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt %s not understood" % fmt)
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d['delta'] = delta.values.astype(
np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d['year'], d['month'] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d['year'], format='%Y').astype(np.int64))
d['days'] = days // NS_PER_DAY
elif infer_dtype(dates) == 'datetime':
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d['delta'] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d['year'] = year_month.values // 100
d['month'] = (year_month.values - d['year'] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d['days'] = v(dates)
else:
raise ValueError('Columns containing dates must contain either '
'datetime64, datetime.datetime or null values.')
return DataFrame(d, index=index)
bad_loc = isna(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + \
(d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError("Format %s is not a known Stata date format" % fmt)
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '%s' does not satisfy this restriction.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint columns are converted to int of the
same size if there is no loss in precision, otherwise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ''
# original, if small, if large
conversion_data = ((np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0]
float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if (data[col].max() <= 2147483620 and
data[col].min() >= -2147483647):
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
msg = 'Column {0} has a maximum value of infinity which is ' \
'outside the range supported by Stata.'
raise ValueError(msg.format(col))
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
msg = 'Column {0} has a maximum value ({1}) outside the ' \
'range supported by Stata ({1})'
raise ValueError(msg.format(col, value, float64_max))
if ws:
import warnings
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel(object):
"""
Parse a categorical column and prepare formatted output
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, string_types):
category = str(category)
import warnings
warnings.warn(value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError('Stata value labels for a single variable must '
'have a combined length less than 32,000 '
'characters.')
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatibility shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
return s
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue(StringMixin):
"""
An observation's missing value.
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[compat.long(b)] = '.'
for i in range(1, 27):
MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i)
float32_base = b'\x00\x00\x00\x7f'
increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
for i in range(27):
value = struct.unpack('<f', float32_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('<i', struct.pack('<f', value))[
0] + increment
float32_base = struct.pack('<i', int_value)
float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
for i in range(27):
value = struct.unpack('<d', float64_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
float64_base = struct.pack('q', int_value)
BASE_MISSING_VALUES = {'int8': 101,
'int16': 32741,
'int32': 2147483621,
'float32': struct.unpack('<f', float32_base)[0],
'float64': struct.unpack('<d', float64_base)[0]}
def __init__(self, value):
self._value = value
# Conversion to long to avoid hash issues on 32 bit platforms #8968
value = compat.long(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
value = property(lambda self: self._value,
doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "%s(%s)" % (self.__class__, self)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.string == other.string and self.value == other.value)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES['int8']
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES['int16']
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES['int32']
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES['float32']
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES['float64']
else:
raise ValueError('Unsupported dtype')
return value
class StataParser(object):
_default_encoding = 'latin-1'
def __init__(self, encoding):
if encoding is not None:
if encoding not in VALID_ENCODINGS:
raise ValueError('Unknown encoding. Only latin-1 and ascii '
'supported.')
self._encoding = encoding
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = \
dict(
lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) +
[
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64)
]
)
self.DTYPE_MAP_XML = \
dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8)
]
)
self.TYPE_MAP = lrange(251) + list('bhlfd')
self.TYPE_MAP_XML = \
dict(
[
# Not really a Q, unclear how to handle byteswap
(32768, 'Q'),
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
(65529, 'h'),
(65530, 'b')
]
)
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b'\xff\xff\xff\xfe'
float32_max = b'\xff\xff\xff\x7e'
float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff'
float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f'
self.VALID_RANGE = {
'b': (-127, 100),
'h': (-32767, 32740),
'l': (-2147483647, 2147483620),
'f': (np.float32(struct.unpack('<f', float32_min)[0]),
np.float32(struct.unpack('<f', float32_max)[0])),
'd': (np.float64(struct.unpack('<d', float64_min)[0]),
np.float64(struct.unpack('<d', float64_max)[0]))
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254 # float
# don't know old code for double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
'b': 101,
'h': 32741,
'l': 2147483621,
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(
struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
self.NUMPY_TYPE_MAP = {
'b': 'i1',
'h': 'i2',
'l': 'i4',
'f': 'f4',
'd': 'f8',
'Q': 'u8'
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
'byte', 'case', 'catch', 'class', 'colvector',
'complex', 'const', 'continue', 'default',
'delegate', 'delete', 'do', 'double', 'else',
'eltypedef', 'end', 'enum', 'explicit',
'export', 'external', 'float', 'for', 'friend',
'function', 'global', 'goto', 'if', 'inline',
'int', 'local', 'long', 'NULL', 'pragma',
'protected', 'quad', 'rowvector', 'short',
'typedef', 'typename', 'virtual')
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
@deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index_col=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='latin-1', chunksize=None):
super(StataReader, self).__init__(encoding)
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index_col = index_col
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
if encoding is not None:
if encoding not in VALID_ENCODINGS:
raise ValueError('Unknown encoding. Only latin-1 and ascii '
'supported.')
self._encoding = encoding
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _ = get_filepath_or_buffer(
path_or_buf, encoding=self._default_encoding
)
if isinstance(path_or_buf, (str, compat.text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
try:
contents = contents.encode(self._default_encoding)
except:
pass
self.path_or_buf = BytesIO(contents)
self._read_header()
self._setup_dtype()
def __enter__(self):
""" enter context manager """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" exit context manager """
self.close()
def close(self):
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
self._read_new_header(first_char)
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist
if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118]:
raise ValueError(_version_error)
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self.data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
self._seek_varnames = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_sortlist = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_formats = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
self._seek_value_label_names = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
self.seek_strls = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
self.seek_value_labels = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-118.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
for i in range(self.nvar)]
def f(typ):
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata types [{0}]".
format(typ))
typlist = [f(x) for x in raw_typlist]
def f(typ):
if typ <= 2045:
return str(typ)
try:
return self.DTYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata dtype [{0}]"
.format(typ))
dtyplist = [f(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self):
if self.format_version == 117:
b = 33
elif self.format_version == 118:
b = 129
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
if self.format_version == 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the label list
def _get_lbllist(self):
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
def _get_variable_labels(self):
if self.format_version == 118:
vlblist = [self._decode(self.path_or_buf.read(321))
for i in range(self.nvar)]
elif self.format_version > 105:
vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
vlblist = [self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)]
return vlblist
def _get_nobs(self):
if self.format_version == 118:
return struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
def _get_data_label(self):
if self.format_version == 118:
strlen = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._null_terminate(self.path_or_buf.read(81))
else:
return self._null_terminate(self.path_or_buf.read(32))
def _get_time_stamp(self):
if self.format_version == 118:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._null_terminate(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self):
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_lables>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version == 118:
return struct.unpack(self.byteorder + 'q',
self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char):
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[
0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self.data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1))
for i in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # py2 string, py3 bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(str(x) for x in typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.varlist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(self.byteorder + 'b',
self.path_or_buf.read(1))[0]
if self.format_version > 108:
data_len = struct.unpack(self.byteorder + 'i',
self.path_or_buf.read(4))[0]
else:
data_len = struct.unpack(self.byteorder + 'h',
self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _setup_dtype(self):
"""Map between numpy and state dtypes"""
if self._dtype is not None:
return self._dtype
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder +
self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
return self._dtype
def _calcsize(self, fmt):
return (type(fmt) is int and fmt or
struct.calcsize(self.byteorder + fmt))
def _decode(self, s):
s = s.partition(b"\0")[0]
return s.decode('utf-8')
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
return s.decode(self._encoding or self._default_encoding)
else:
null_byte = "\0"
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _read_value_labels(self):
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
return
if self._value_labels_read:
# Don't read twice
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b'</val': # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._null_terminate(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
off = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
val = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
if self.format_version <= 117:
self.value_label_dict[labname][val[i]] = (
self._null_terminate(txt[off[i]:end]))
else:
self.value_label_dict[labname][val[i]] = (
self._decode(txt[off[i]:end]))
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO = {'0': ''}
while True:
if self.path_or_buf.read(3) != b'GSO':
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
if self.byteorder == '<':
buf = buf[0:2] + buf[4:10]
else:
buf = buf[0:2] + buf[6:]
v_o = struct.unpack('Q', buf)[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
encoding = 'utf-8'
if self.format_version == 117:
encoding = self._encoding or self._default_encoding
va = va[0:-1].decode(encoding)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO[str(v_o)] = va
# legacy
@Appender(_data_method_doc)
def data(self, **kwargs):
import warnings
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
@deprecate_kwarg(old_arg_name='index', new_arg_name='index_col')
def read(self, nrows=None, convert_dates=None,
convert_categoricals=None, index_col=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if index_col is None:
index_col = self._index_col
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (not self._value_labels_read):
self._can_read_value_labels = True
self._read_strls()
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist)
else:
data = DataFrame.from_records(data)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index_col is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(
self._null_terminate, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
ix = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], ix, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_items(data_formatted)
del data_formatted
self._do_convert_missing(data, convert_missing)
if convert_dates:
cols = np.where(lmap(lambda x: any(x.startswith(fmt)
for fmt in _date_formats),
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col],
self.fmtlist[i])
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(data,
self.value_label_dict,
self.lbllist,
order_categoricals)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_items(retyped_data)
if index_col is not None:
data = data.set_index(data.pop(index_col))
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing)
umissing, umissing_loc = np.unique(series[missing],
return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
data[colname] = replacement
def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != 'Q':
continue
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError('columns contains duplicate entries')
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError('The following columns were not found in the '
'Stata data set: ' +
', '.join(list(unmatched)))
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(self, data, value_label_dict, lbllist,
order_categoricals):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(compat.iterkeys(value_label_dict))
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
try:
cat_data.categories = categories
except ValueError:
vc = Series(categories).value_counts()
repeats = list(vc.index[vc > 1])
repeats = '\n' + '-' * 80 + '\n'.join(repeats)
msg = 'Value labels for column {0} are not unique. The ' \
'repeated labels are:\n{1}'.format(col, repeats)
raise ValueError(msg)
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_items(cat_converted_data)
return data
def data_label(self):
"""Returns data label of Stata file"""
return self.data_label
def variable_labels(self):
"""Returns variable labels as a dict, associating each variable name
with corresponding label
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self):
"""Returns a dict, associating each variable name a dict, associating
each value its corresponding label
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
# if 'b' not in fname.mode:
return fname
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _pad_bytes(name, length):
"""
Takes a char string and pads it with null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise NotImplementedError("Format %s not implemented" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a "
"column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - chr(251) - for int8 byte
252 - chr(252) - for int16 int
253 - chr(253) - for int32 long
254 - chr(254) - for float32 float
255 - chr(255) - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(_ensure_object(column.values))
return chr(max(itemsize, 1))
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int32:
return chr(253)
elif dtype == np.int16:
return chr(252)
elif dtype == np.int8:
return chr(251)
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
def _dtype_to_default_stata_fmt(dtype, column):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column.dropna())
if not (inferred_dtype in ('string', 'unicode') or
len(column) == 0):
raise ValueError('Writing general object arrays is not supported')
itemsize = max_len_string_array(_ensure_object(column.values))
if itemsize > 244:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : str or buffer
String path of file-like object
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when writing the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Only latin-1 and ascii are supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
super(StataWriter, self).__init__(encoding)
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = _stringify_path(fname)
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
else:
self._file.write(to_write)
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export '
'int64-based categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_items(data_formatted)
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES['f']
else:
replacement = self.MISSING_VALUES['d']
data[c] = data[c].fillna(replacement)
return data
def _check_column_names(self, data):
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names = []
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, string_types):
name = text_type(name)
for c in name:
if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \
(c < '0' or c > '9') and c != '_':
name = name.replace(c, '_')
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = '_' + name
# Variable name may not start with a number
if name[0] >= '0' and name[0] <= '9':
name = '_' + name
name = name[:min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = '_' + str(duplicate_var_id) + name
name = name[:min(len(name), 32)]
duplicate_var_id += 1
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode('utf-8')
except:
pass
converted_names.append(
'{0} -> {1}'.format(orig_name, name))
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
import warnings
ws = invalid_name_doc.format('\n '.join(converted_names))
warnings.warn(ws, InvalidColumnName)
return data
def _prepare_pandas(self, data):
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = 'tc'
self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates,
self.varlist)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(
self._convert_dates[key]
)
dtypes[key] = np.dtype(new_type)
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._file = _open_file_binary_write(
self._fname, self._encoding or self._default_encoding
)
try:
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
self._prepare_data()
self._write_data()
self._write_value_labels()
finally:
self._file.close()
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder,
self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(
self._null_terminate(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
# GH #13856
# Avoid locale-specific month conversion
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
month_lookup = {i + 1: month for i, month in enumerate(months)}
ts = (time_stamp.strftime("%d ") +
month_lookup[time_stamp.month] +
time_stamp.strftime(" %Y %H:%M"))
self._file.write(self._null_terminate(ts))
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (nvar + 1))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
for i in range(nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes('', 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError('Variable labels must be 80 characters '
'or fewer')
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError('Variable labels must contain only '
'characters that can be encoded in '
'Latin-1')
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _prepare_data(self):
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(data[col],
self.fmtlist[i])
# 2. Convert bad string data to '' and pad to correct length
dtype = []
data_cols = []
has_strings = False
for i, col in enumerate(data):
typ = ord(typlist[i])
if typ <= 244:
has_strings = True
data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
stype = 'S%d' % typ
dtype.append(('c' + str(i), stype))
string = data[col].str.encode(self._encoding)
data_cols.append(string.values.astype(stype))
else:
dtype.append(('c' + str(i), data[col].dtype))
data_cols.append(data[col].values)
dtype = np.dtype(dtype)
if has_strings:
self.data = np.fromiter(zip(*data_cols), dtype=dtype)
else:
self.data = data.to_records(index=False)
def _write_data(self):
data = self.data
data.tofile(self._file)
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
if compat.PY3 and not as_string:
s += null_byte
return s.encode(self._encoding)
else:
s += null_byte
return s
|
py | b409cff5109f920da8ce03e9138034a7b788da87 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pathlib
import shutil
import subprocess
import sys
import nox # type: ignore
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt"
PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8")
nox.sessions = [
"unit",
"cover",
"mypy",
"check_lower_bounds"
# exclude update_lower_bounds from default
"docs",
]
@nox.session(python=['3.6', '3.7', '3.8', '3.9'])
def unit(session):
"""Run the unit test suite."""
session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio')
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google/cloud/billing/budgets_v1/',
'--cov-config=.coveragerc',
'--cov-report=term',
'--cov-report=html',
os.path.join('tests', 'unit', ''.join(session.posargs))
)
@nox.session(python='3.7')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=['3.6', '3.7'])
def mypy(session):
"""Run the type checker."""
session.install('mypy', 'types-pkg_resources')
session.install('.')
session.run(
'mypy',
'--explicit-package-bases',
'google',
)
@nox.session
def update_lower_bounds(session):
"""Update lower bounds in constraints.txt to match setup.py"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'update',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session
def check_lower_bounds(session):
"""Check lower bounds in setup.py are reflected in constraints file"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'check',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session(python='3.6')
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx<3.0.0", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
|
py | b409d00917924424c03ff24c3634be1c4bc13822 | #====================== BEGIN GPL LICENSE BLOCK ======================
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#======================= END GPL LICENSE BLOCK ========================
# <pep8 compliant>
import bpy
import math
from itertools import count
from mathutils import Vector, Matrix
from ...utils.rig import is_rig_base_bone
from ...utils.bones import align_chain_x_axis, align_bone_x_axis, align_bone_y_axis, align_bone_z_axis
from ...utils.bones import align_bone_to_axis, flip_bone, put_bone, align_bone_orientation
from ...utils.naming import make_derived_name
from ...utils.misc import map_list, matrix_from_axis_roll, matrix_from_axis_pair
from ...utils.widgets import adjust_widget_transform_mesh
from ..widgets import create_foot_widget, create_ballsocket_widget
from ...base_rig import stage
from .limb_rigs import BaseLimbRig
DEG_360 = math.pi * 2
ALL_TRUE = (True, True, True)
class Rig(BaseLimbRig):
"""Human leg rig."""
def find_org_bones(self, bone):
bones = super().find_org_bones(bone)
for b in self.get_bone(bones.main[2]).bone.children:
if not b.use_connect and not b.children and not is_rig_base_bone(self.obj, b.name):
bones.heel = b.name
break
else:
self.raise_error("Heel bone not found.")
return bones
def initialize(self):
if len(self.bones.org.main) != 4:
self.raise_error("Input to rig type must be a chain of 4 bones.")
super().initialize()
self.pivot_type = self.params.foot_pivot_type
self.heel_euler_order = 'ZXY' if self.main_axis == 'x' else 'XZY'
assert self.pivot_type in {'ANKLE', 'TOE', 'ANKLE_TOE'}
def prepare_bones(self):
orgs = self.bones.org.main
foot = self.get_bone(orgs[2])
ik_y_axis = (0, 1, 0)
foot_y_axis = -self.vector_without_z(foot.y_axis)
foot_x = foot_y_axis.cross((0, 0, 1))
if self.params.rotation_axis == 'automatic':
align_chain_x_axis(self.obj, orgs[0:2])
# Orient foot and toe
align_bone_x_axis(self.obj, orgs[2], foot_x)
align_bone_x_axis(self.obj, orgs[3], -foot_x)
align_bone_x_axis(self.obj, self.bones.org.heel, Vector((0, 0, 1)))
elif self.params.auto_align_extremity:
if self.main_axis == 'x':
align_bone_x_axis(self.obj, orgs[2], foot_x)
align_bone_x_axis(self.obj, orgs[3], -foot_x)
else:
align_bone_z_axis(self.obj, orgs[2], foot_x)
align_bone_z_axis(self.obj, orgs[3], -foot_x)
else:
ik_y_axis = foot_y_axis
# Orientation of the IK main and roll control bones
self.ik_matrix = matrix_from_axis_roll(ik_y_axis, 0)
self.roll_matrix = matrix_from_axis_pair(ik_y_axis, foot_x, self.main_axis)
####################################################
# EXTRA BONES
#
# org:
# heel:
# Heel location marker bone
# ctrl:
# ik_spin:
# Toe spin control.
# heel:
# Foot roll control
# mch:
# heel[]:
# Chain of bones implementing foot roll.
#
####################################################
####################################################
# IK controls
def get_extra_ik_controls(self):
controls = super().get_extra_ik_controls() + [self.bones.ctrl.heel]
if self.pivot_type == 'ANKLE_TOE':
controls += [self.bones.ctrl.ik_spin]
return controls
def make_ik_control_bone(self, orgs):
name = self.copy_bone(orgs[2], make_derived_name(orgs[2], 'ctrl', '_ik'))
if self.pivot_type == 'TOE':
put_bone(self.obj, name, self.get_bone(name).tail, matrix=self.ik_matrix)
else:
put_bone(self.obj, name, None, matrix=self.ik_matrix)
return name
def build_ik_pivot(self, ik_name, **args):
heel_bone = self.get_bone(self.bones.org.heel)
args = {
'position': (heel_bone.head + heel_bone.tail)/2,
**args
}
return super().build_ik_pivot(ik_name, **args)
def register_switch_parents(self, pbuilder):
super().register_switch_parents(pbuilder)
pbuilder.register_parent(self, self.bones.org.main[2], exclude_self=True, tags={'limb_end'})
def make_ik_ctrl_widget(self, ctrl):
obj = create_foot_widget(self.obj, ctrl)
if self.pivot_type != 'TOE':
ctrl = self.get_bone(ctrl)
org = self.get_bone(self.bones.org.main[2])
offset = org.tail - (ctrl.custom_shape_transform or ctrl).head
adjust_widget_transform_mesh(obj, Matrix.Translation(offset))
####################################################
# IK pivot controls
def get_ik_pivot_output(self):
if self.pivot_type == 'ANKLE_TOE':
return self.bones.ctrl.ik_spin
else:
return self.get_ik_control_output()
@stage.generate_bones
def make_ik_pivot_controls(self):
if self.pivot_type == 'ANKLE_TOE':
self.bones.ctrl.ik_spin = self.make_ik_spin_bone(self.bones.org.main)
def make_ik_spin_bone(self, orgs):
name = self.copy_bone(orgs[2], make_derived_name(orgs[2], 'ctrl', '_spin_ik'))
put_bone(self.obj, name, self.get_bone(orgs[3]).head, matrix=self.ik_matrix, scale=0.5)
return name
@stage.parent_bones
def parent_ik_pivot_controls(self):
if self.pivot_type == 'ANKLE_TOE':
self.set_bone_parent(self.bones.ctrl.ik_spin, self.get_ik_control_output())
@stage.generate_widgets
def make_ik_spin_control_widget(self):
if self.pivot_type == 'ANKLE_TOE':
obj = create_ballsocket_widget(self.obj, self.bones.ctrl.ik_spin, size=0.75)
rotfix = Matrix.Rotation(math.pi/2, 4, self.main_axis.upper())
adjust_widget_transform_mesh(obj, rotfix, local=True)
####################################################
# Heel control
@stage.generate_bones
def make_heel_control_bone(self):
org = self.bones.org.main[2]
name = self.copy_bone(org, make_derived_name(org, 'ctrl', '_heel_ik'))
put_bone(self.obj, name, None, matrix=self.roll_matrix, scale=0.5)
self.bones.ctrl.heel = name
@stage.parent_bones
def parent_heel_control_bone(self):
self.set_bone_parent(self.bones.ctrl.heel, self.get_ik_pivot_output(), inherit_scale='AVERAGE')
@stage.configure_bones
def configure_heel_control_bone(self):
bone = self.get_bone(self.bones.ctrl.heel)
bone.lock_location = True, True, True
bone.rotation_mode = self.heel_euler_order
bone.lock_scale = True, True, True
@stage.generate_widgets
def generate_heel_control_widget(self):
create_ballsocket_widget(self.obj, self.bones.ctrl.heel)
####################################################
# Heel roll MCH
@stage.generate_bones
def make_roll_mch_chain(self):
orgs = self.bones.org.main
self.bones.mch.heel = self.make_roll_mch_bones(orgs[2], orgs[3], self.bones.org.heel)
def make_roll_mch_bones(self, foot, toe, heel):
foot_bone = self.get_bone(foot)
heel_bone = self.get_bone(heel)
heel_middle = (heel_bone.head + heel_bone.tail) / 2
result = self.copy_bone(foot, make_derived_name(foot, 'mch', '_roll'), scale=0.25)
roll1 = self.copy_bone(toe, make_derived_name(heel, 'mch', '_roll1'), scale=0.3)
roll2 = self.copy_bone(toe, make_derived_name(heel, 'mch', '_roll2'), scale=0.3)
rock1 = self.copy_bone(heel, make_derived_name(heel, 'mch', '_rock1'))
rock2 = self.copy_bone(heel, make_derived_name(heel, 'mch', '_rock2'))
put_bone(self.obj, roll1, None, matrix=self.roll_matrix)
put_bone(self.obj, roll2, heel_middle, matrix=self.roll_matrix)
put_bone(self.obj, rock1, heel_bone.tail, matrix=self.roll_matrix, scale=0.5)
put_bone(self.obj, rock2, heel_bone.head, matrix=self.roll_matrix, scale=0.5)
return [ rock2, rock1, roll2, roll1, result ]
@stage.parent_bones
def parent_roll_mch_chain(self):
chain = self.bones.mch.heel
self.set_bone_parent(chain[0], self.get_ik_pivot_output())
self.parent_bone_chain(chain)
@stage.rig_bones
def rig_roll_mch_chain(self):
self.rig_roll_mch_bones(self.bones.mch.heel, self.bones.ctrl.heel, self.bones.org.heel)
def rig_roll_mch_bones(self, chain, heel, org_heel):
rock2, rock1, roll2, roll1, result = chain
# This order is required for correct working of the constraints
for bone in chain:
self.get_bone(bone).rotation_mode = self.heel_euler_order
self.make_constraint(roll1, 'COPY_ROTATION', heel, space='POSE')
if self.main_axis == 'x':
self.make_constraint(roll2, 'COPY_ROTATION', heel, space='LOCAL', use_xyz=(True, False, False))
self.make_constraint(roll2, 'LIMIT_ROTATION', min_x=-DEG_360, space='LOCAL')
else:
self.make_constraint(roll2, 'COPY_ROTATION', heel, space='LOCAL', use_xyz=(False, False, True))
self.make_constraint(roll2, 'LIMIT_ROTATION', min_z=-DEG_360, space='LOCAL')
direction = self.get_main_axis(self.get_bone(heel)).dot(self.get_bone(org_heel).vector)
if direction < 0:
rock2, rock1 = rock1, rock2
self.make_constraint(
rock1, 'COPY_ROTATION', heel, space='LOCAL',
use_xyz=(False, True, False),
)
self.make_constraint(
rock2, 'COPY_ROTATION', heel, space='LOCAL',
use_xyz=(False, True, False),
)
self.make_constraint(rock1, 'LIMIT_ROTATION', max_y=DEG_360, space='LOCAL')
self.make_constraint(rock2, 'LIMIT_ROTATION', min_y=-DEG_360, space='LOCAL')
####################################################
# FK parents MCH chain
def parent_fk_parent_bone(self, i, parent_mch, prev_ctrl, org, prev_org):
if i == 3:
align_bone_orientation(self.obj, parent_mch, self.bones.mch.heel[2])
self.set_bone_parent(parent_mch, prev_org, use_connect=True)
else:
super().parent_fk_parent_bone(i, parent_mch, prev_ctrl, org, prev_org)
def rig_fk_parent_bone(self, i, parent_mch, org):
if i == 3:
con = self.make_constraint(parent_mch, 'COPY_TRANSFORMS', self.bones.mch.heel[2])
self.make_driver(con, 'influence', variables=[(self.prop_bone, 'IK_FK')], polynomial=[1.0, -1.0])
else:
super().rig_fk_parent_bone(i, parent_mch, org)
####################################################
# IK system MCH
def get_ik_input_bone(self):
return self.bones.mch.heel[-1]
@stage.parent_bones
def parent_ik_mch_chain(self):
super().parent_ik_mch_chain()
self.set_bone_parent(self.bones.mch.ik_target, self.bones.mch.heel[-1])
####################################################
# Settings
@classmethod
def add_parameters(self, params):
super().add_parameters(params)
items = [
('ANKLE', 'Ankle',
'The foots pivots at the ankle'),
('TOE', 'Toe',
'The foot pivots around the base of the toe'),
('ANKLE_TOE', 'Ankle and Toe',
'The foots pivots at the ankle, with extra toe pivot'),
]
params.foot_pivot_type = bpy.props.EnumProperty(
items = items,
name = "Foot Pivot",
default = 'ANKLE_TOE'
)
@classmethod
def parameters_ui(self, layout, params):
layout.prop(params, 'foot_pivot_type')
super().parameters_ui(layout, params, 'Foot')
def create_sample(obj):
# generated by rigify.utils.write_metarig
bpy.ops.object.mode_set(mode='EDIT')
arm = obj.data
bones = {}
bone = arm.edit_bones.new('thigh.L')
bone.head[:] = 0.0980, 0.0124, 1.0720
bone.tail[:] = 0.0980, -0.0286, 0.5372
bone.roll = 0.0000
bone.use_connect = False
bones['thigh.L'] = bone.name
bone = arm.edit_bones.new('shin.L')
bone.head[:] = 0.0980, -0.0286, 0.5372
bone.tail[:] = 0.0980, 0.0162, 0.0852
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['thigh.L']]
bones['shin.L'] = bone.name
bone = arm.edit_bones.new('foot.L')
bone.head[:] = 0.0980, 0.0162, 0.0852
bone.tail[:] = 0.0980, -0.0934, 0.0167
bone.roll = 0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['shin.L']]
bones['foot.L'] = bone.name
bone = arm.edit_bones.new('toe.L')
bone.head[:] = 0.0980, -0.0934, 0.0167
bone.tail[:] = 0.0980, -0.1606, 0.0167
bone.roll = -0.0000
bone.use_connect = True
bone.parent = arm.edit_bones[bones['foot.L']]
bones['toe.L'] = bone.name
bone = arm.edit_bones.new('heel.02.L')
bone.head[:] = 0.0600, 0.0459, 0.0000
bone.tail[:] = 0.1400, 0.0459, 0.0000
bone.roll = 0.0000
bone.use_connect = False
bone.parent = arm.edit_bones[bones['foot.L']]
bones['heel.02.L'] = bone.name
bpy.ops.object.mode_set(mode='OBJECT')
pbone = obj.pose.bones[bones['thigh.L']]
pbone.rigify_type = 'limbs.leg'
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
try:
pbone.rigify_parameters.separate_ik_layers = True
except AttributeError:
pass
try:
pbone.rigify_parameters.ik_layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.separate_hose_layers = True
except AttributeError:
pass
try:
pbone.rigify_parameters.hose_layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.limb_type = "leg"
except AttributeError:
pass
try:
pbone.rigify_parameters.fk_layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
try:
pbone.rigify_parameters.tweak_layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
except AttributeError:
pass
pbone = obj.pose.bones[bones['shin.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['foot.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['toe.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
pbone = obj.pose.bones[bones['heel.02.L']]
pbone.rigify_type = ''
pbone.lock_location = (False, False, False)
pbone.lock_rotation = (False, False, False)
pbone.lock_rotation_w = False
pbone.lock_scale = (False, False, False)
pbone.rotation_mode = 'QUATERNION'
bpy.ops.object.mode_set(mode='EDIT')
for bone in arm.edit_bones:
bone.select = False
bone.select_head = False
bone.select_tail = False
for b in bones:
bone = arm.edit_bones[bones[b]]
bone.select = True
bone.select_head = True
bone.select_tail = True
arm.edit_bones.active = bone
for eb in arm.edit_bones:
eb.layers = (False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)
arm.layers = (False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False)
return bones
|
py | b409d0e1e3965dda8930d3a6fa61f23d24754bf7 | import sys
from main import process_playlist
try:
url = sys.argv[1]
except:
url = None
process_playlist(url)
|
py | b409d21befcc223719ab5ad8673d43ab8575c0f6 | import os
import pickle
import re
import shutil
from enum import Enum, unique
from pathlib import Path
from typing import Any, List, Optional, Union
from hyperstyle.src.python.review.common.file_system import Extension, ItemCondition
@unique
class AnalysisExtension(Enum):
XLSX = '.xlsx'
CSV = '.csv'
PICKLE = '.pickle'
JSON = '.json'
HTML = '.html'
TXT = '.txt'
# Image extensions
PNG = '.png'
JPG = '.jpg'
JPEG = '.jpeg'
WEBP = '.webp'
SVG = '.svg'
PDF = '.pdf'
EPS = '.eps'
# Not empty extensions are returned with a dot, for example, '.txt'
# If file has no extensions, an empty one ('') is returned
@classmethod
def get_extension_from_file(cls, file: Union[Path, str]) -> Union['AnalysisExtension', Extension]:
ext = os.path.splitext(file)[1]
try:
return AnalysisExtension(ext)
except ValueError:
return Extension(ext)
@classmethod
def get_image_extensions(cls) -> List[Union[Extension, 'AnalysisExtension']]:
return [
AnalysisExtension.PNG,
AnalysisExtension.JPG,
AnalysisExtension.JPEG,
AnalysisExtension.WEBP,
AnalysisExtension.SVG,
AnalysisExtension.PDF,
AnalysisExtension.EPS,
]
def get_restricted_extension(file_path: Optional[Union[str, Path]] = None,
available_values: List[Union[Extension, AnalysisExtension]]
= None) -> Union[Extension, AnalysisExtension]:
if file_path is None:
return Extension.EMPTY
ext = AnalysisExtension.get_extension_from_file(file_path)
if available_values is not None and ext not in available_values:
raise ValueError(f'Invalid extension. '
f'Available values are: {list(map(lambda e: e.value, available_values))}.')
return ext
def extension_file_condition(extension: Union[Extension, AnalysisExtension]) -> ItemCondition:
def has_this_extension(name: str) -> bool:
try:
return AnalysisExtension.get_extension_from_file(name) == extension
except ValueError:
return False
return has_this_extension
def match_condition(regex: str) -> ItemCondition:
def does_name_match(name: str) -> bool:
return re.fullmatch(regex, name) is not None
return does_name_match
def serialize_data_and_write_to_file(path: Path, data: Any) -> None:
os.makedirs(get_parent_folder(path), exist_ok=True)
with open(path, 'wb') as f:
p = pickle.Pickler(f)
p.dump(data)
def deserialize_data_from_file(path: Path) -> Any:
with open(path, 'rb') as f:
u = pickle.Unpickler(f)
return u.load()
# For getting name of the last folder or file
# For example, returns 'folder' for both 'path/data/folder' and 'path/data/folder/'
def get_name_from_path(path: Union[Path, str], with_extension: bool = True) -> str:
head, tail = os.path.split(path)
# Tail can be empty if '/' is at the end of the path
file_name = tail or os.path.basename(head)
if not with_extension:
file_name = os.path.splitext(file_name)[0]
elif AnalysisExtension.get_extension_from_file(file_name) == Extension.EMPTY:
raise ValueError('Cannot get file name with extension, because the passed path does not contain it')
return file_name
# File should contain the full path and its extension.
# Create all parents if necessary
def create_file(file_path: Union[str, Path], content: str):
file_path = Path(file_path)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'w+') as f:
f.writelines(content)
yield Path(file_path)
def copy_file(source: Union[str, Path], destination: Union[str, Path]):
shutil.copy(source, destination)
def create_directory(path: str):
if not os.path.exists(path):
os.makedirs(path)
def copy_directory(source: Union[str, Path], destination: Union[str, Path], dirs_exist_ok: bool = True):
shutil.copytree(source, destination, dirs_exist_ok=dirs_exist_ok)
def get_parent_folder(path: Union[Path, str], to_add_slash: bool = False) -> Path:
path = remove_slash(str(path))
parent_folder = '/'.join(path.split('/')[:-1])
if to_add_slash:
parent_folder = add_slash(parent_folder)
return Path(parent_folder)
def add_slash(path: str) -> str:
if not path.endswith('/'):
path += '/'
return path
def remove_slash(path: str) -> str:
return path.rstrip('/')
def remove_directory(directory: Union[str, Path]) -> None:
if os.path.isdir(directory):
shutil.rmtree(directory, ignore_errors=True)
|
py | b409d2a71de60cb8101c19e57042503e4579eedc | import warnings
import pytest
from multipledispatch import Dispatcher
from multipledispatch.conflict import AmbiguityWarning
from ibis.backends.dask.trace import TwoLevelDispatcher
class A1:
pass
class A2(A1):
pass
class A3(A2):
pass
class B1:
pass
class B2(B1):
pass
class B3(B2):
pass
@pytest.fixture
def foo_dispatchers():
foo = TwoLevelDispatcher('foo', doc='Test dispatcher foo')
foo_m = Dispatcher('foo_m', doc='Control dispatcher foo_m')
@foo.register(A1, B1)
@foo_m.register(A1, B1)
def foo0(x, y):
return 0
@foo.register(A1, B2)
@foo_m.register(A1, B2)
def foo1(x, y):
return 1
@foo.register(A2, B1)
@foo_m.register(A2, B1)
def foo2(x, y):
return 2
@foo.register(A2, B2)
@foo_m.register(A2, B2)
def foo3(x, y):
return 3
@foo.register(
(A1, A2),
)
@foo_m.register(
(A1, A2),
)
def foo4(x):
return 4
return foo, foo_m
@pytest.fixture
def foo(foo_dispatchers):
return foo_dispatchers[0]
@pytest.fixture
def foo_m(foo_dispatchers):
return foo_dispatchers[1]
def test_cache(foo, mocker):
"""Test that cache is properly set after calling with args."""
spy = mocker.spy(foo, 'dispatch')
a1, b1 = A1(), B1()
assert (A1, B1) not in foo._cache
foo(a1, b1)
assert (A1, B1) in foo._cache
foo(a1, b1)
spy.assert_called_once_with(A1, B1)
def test_dispatch(foo, mocker):
"""Test that calling dispatcher with a signature that is registered
does not trigger a linear search through dispatch_iter."""
spy = mocker.spy(foo, 'dispatch_iter')
# This should not trigger a linear search
foo(A1(), B1())
assert not spy.called, (
"Calling dispatcher with registered signature should "
"not trigger linear search"
)
foo(A3(), B3())
spy.assert_called_once_with(A3, B3)
@pytest.mark.parametrize(
'args',
[
(A1(), B1()),
(A1(), B2()),
(A1(), B3()),
(A2(), B1()),
(A2(), B2()),
(A2(), B3()),
(A3(), B1()),
(A3(), B2()),
(A3(), B3()),
(A1(),),
(A2(),),
(A3(),),
],
)
def test_registered(foo_dispatchers, args):
foo, foo_m = foo_dispatchers
assert foo(*args) == foo_m(*args)
def test_ordering(foo, foo_m):
assert foo.ordering == foo_m.ordering
def test_funcs(foo, foo_m):
assert foo.funcs == foo_m.funcs
@pytest.mark.parametrize(
'args', [(B1(),), (B2(),), (A1(), A1()), (A1(), A2(), A3())]
)
def test_unregistered(foo, args):
with pytest.raises(
NotImplementedError, match="Could not find signature for foo.*"
):
foo(*args)
def test_ambiguities_warning():
bar = TwoLevelDispatcher('bar')
bar.register(A1, B1)(lambda a, b: 0)
bar.register(A1, B2)(lambda a, b: 1)
bar.register(A2, B1)(lambda a, b: 2)
with pytest.warns(AmbiguityWarning, match=".*Consider.*\n\n.*(A2, B2).*"):
bar.reorder()
def test_ambiguities_no_warning():
bar = TwoLevelDispatcher('bar')
bar.register(A1, B1)(lambda a, b: 0)
bar.register(A1, B2)(lambda a, b: 1)
bar.register(A2, B1)(lambda a, b: 2)
bar.register(A2, B2)(lambda a, b: 3)
with warnings.catch_warnings():
warnings.simplefilter("error")
bar.reorder()
|
py | b409d31e3c8902c6322f6b25b7ba0c7d6d8adf12 | import sys
import asyncio
from concurrent.futures import ThreadPoolExecutor
from pytest import fixture
from .mocks import MockClient, MockTransport
from oshino.augments.stats import SimpleMovingAverage
@fixture
def mock_client(request):
client = MockClient()
request.addfinalizer(client.on_stop)
return client
@fixture
def mock_transport():
return MockTransport()
@fixture
def broken_transport():
return MockTransport(broken=True)
@fixture(scope="session")
def executor(request):
loop = asyncio.get_event_loop()
print("Loop: {0}".format(loop))
ex = ThreadPoolExecutor(max_workers=3)
def on_stop():
ex.shutdown(wait=True)
loop.close()
print("done closing")
loop.set_default_executor(ex)
request.addfinalizer(on_stop)
return ex
@fixture
def moving_avg():
return SimpleMovingAverage({
"name": "test_moving_avg",
"key": "test",
"step": 3
})
|
py | b409d353ec1c0996987eee51e838f62ec0ffe057 | import Image
import ImageDraw
import time
from rgbmatrix import Adafruit_RGBmatrix
matrix = Adafruit_RGBmatrix(32, 2)
while True:
for n in range(1,12):
image = Image.open(str(n)+".gif")
image.load() # Must do this before SetImage() calls
matrix.SetImage(image.im.id)
time.sleep(0.08)
matrix.Clear()
#matrix.Fill(0x6F85FF) # Fill screen to sky color
#while True:
# for n in range(32, -image.size[0], -1): # Scroll R to L
# matrix.SetImage(image.im.id, n, 0)
# time.sleep(0.025)
|
py | b409d3784d0f62f3d8a9815a2b6b85ca7a70c4e2 | import os
from flask import Flask
from model import db
from views import api
from config import DB_LINK
def create_app() :
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = DB_LINK
db.init_app(app)
app.register_blueprint(api)
return app
if __name__ == '__main__':
app = create_app()
app.run(port=int(os.environ.get('FLASK_PORT', 5001)))
|
py | b409d3b45de1e9e81bd0d3c0f69891b21ab9dc85 | #!/usr/bin/env python3
"""
Picocrypt v1.11
Dependencies: argon2-cffi, pycryptodome, reedsolo, tkinterdnd2
Copyright (c) Evan Su (https://evansu.cc)
Released under a GNU GPL v3 License
https://github.com/HACKERALERT/Picocrypt
~ In cryptography we trust ~
"""
# Imports
from tkinter import filedialog,messagebox
from threading import Thread
from datetime import datetime
from argon2.low_level import hash_secret_raw,Type
from Crypto.Cipher import ChaCha20_Poly1305
from Crypto.Hash import SHA3_512 as sha3_512
from secrets import compare_digest
from os import urandom,fsync,remove,system
from os.path import getsize,expanduser,isdir
from os.path import dirname,abspath,realpath
from os.path import join as pathJoin
from os.path import split as pathSplit
from tkinterdnd2 import TkinterDnD,DND_FILES
from zipfile import ZipFile
from pathlib import Path
from shutil import rmtree
import sys
import tkinter
import tkinter.ttk
import tkinter.scrolledtext
import webbrowser
import platform
from reedsolo import RSCodec,ReedSolomonError
# Tk/Tcl is a little barbaric, so I'm disabling
# high DPI so it doesn't scale bad and look horrible
try:
from ctypes import windll
windll.shcore.SetProcessDpiAwareness(0)
except:
pass
# Global variables and strings
rootDir = dirname(realpath(__file__))
inputFile = ""
outputFile = ""
outputPath = ""
password = ""
ad = ""
kept = False
working = False
gMode = None
headerRsc = False
allFiles = False
draggedFolderPaths = False
files = False
adString = "File metadata (used to store some text along with the file):"
compressingNotice = "Compressing files together..."
passwordNotice = "Error. The provided password is incorrect."
corruptedNotice = "Error. The input file is corrupted."
veryCorruptedNotice = "Error. The input file and header keys are badly corrupted."
modifiedNotice = "Error. The input file has been intentionally modified."
kCorruptedNotice = "The input file is corrupted, but the output has been kept."
kModifiedNotice = "The input file has been intentionally modified, but the output has been kept."
kVeryCorruptedNotice = "The input file is badly corrupted, but the output has been kept."
derivingNotice = "Deriving key (takes a few seconds)..."
keepNotice = "Keep decrypted output even if it's corrupted or modified"
eraseNotice = "Securely erase and delete original file"
erasingNotice = "Securely erasing original file(s)..."
overwriteNotice = "Output file already exists. Would you like to overwrite it?"
cancelNotice = "Exiting now will lead to broken output. Are you sure?"
rsNotice = "Prevent corruption using Reed-Solomon"
rscNotice = "Creating Reed-Solomon tables..."
unknownErrorNotice = "Unknown error occured. Please try again."
# Create root Tk
tk = TkinterDnD.Tk()
tk.geometry("480x470")
tk.title("Picocrypt")
if platform.system()=="Darwin":
tk.configure(background="#edeced")
else:
tk.configure(background="#ffffff")
tk.resizable(0,0)
# Try setting window icon if included with Picocrypt
try:
favicon = tkinter.PhotoImage(file="./key.png")
tk.iconphoto(False,favicon)
except:
pass
# Some styling
s = tkinter.ttk.Style()
s.configure("TCheckbutton",background="#ffffff")
# Event when user drags file(s) and folder(s) into window
def inputSelected(draggedFile):
global inputFile,working,headerRsc,allFiles,draggedFolderPaths,files
resetUI()
dummy.focus()
status.config(cursor="")
status.bind("<Button-1>",lambda e:None)
# Use try to handle errors
try:
# Create list of input files
allFiles = []
files = []
draggedFolderPaths = []
suffix = ""
tmp = [i for i in draggedFile]
res = []
within = False
tmpName = ""
"""
The next for loop parses data return by tkinterdnd2's file drop method.
When files and folders are dragged, the output (the 'draggedFile' parameter)
will contain the dropped files/folders and will look something like this:
A single file/folder: "C:\Foo\Bar.txt"
A single file/folder with a space in path: "{C:\Foo Bar\Lorem.txt}"
Multiple files/folders: "C:\Foo\Bar1.txt C:\Foo\Ba2.txt"
Multiple files/folders with spaces in paths:
- "C:\Foo\Bar1.txt {C:\Foo Bar\Lorem.txt}"
- "{C:\Foo Bar\Lorem.txt} C:\Foo\Bar1.txt"
- "{C:\Foo Bar\Lorem1.txt} {C:\Foo Bar\Lorem2.txt}"
"""
for i in tmp:
if i=="{":
within = True
elif i=="}":
within = False
res.append(tmpName)
tmpName = ""
else:
if i==" " and not within:
if tmpName!="":
res.append(tmpName)
tmpName = ""
else:
tmpName += i
if tmpName:
res.append(tmpName)
allFiles = []
files = []
# Check each thing dragged by user
for i in res:
# If there is a directory, recursively add all files to 'allFiles'
if isdir(i):
# Record the directory for secure wipe (if necessary)
draggedFolderPaths.append(i)
tmp = Path(i).rglob("*")
for p in tmp:
allFiles.append(abspath(p))
# Just a file, add it to files
else:
files.append(i)
# If there's only one file, set it as input file
if len(files)==1 and len(allFiles)==0:
inputFile = files[0]
files = []
else:
inputFile = ""
# Decide if encrypting or decrypting
if inputFile.endswith(".pcv"):
suffix = " (will decrypt)"
fin = open(inputFile,"rb")
# Read file metadata (a little complex)
tmp = fin.read(139)
reedsolo = False
if tmp[0]==43:
reedsolo = True
tmp = tmp[1:]
else:
tmp = tmp[:-1]
tmp = bytes(headerRsc.decode(tmp)[0])
tmp = tmp.replace(b"+",b"")
tmp = int(tmp.decode("utf-8"))
if not reedsolo:
fin.seek(138)
ad = fin.read(tmp)
try:
ad = bytes(headerRsc.decode(ad)[0])
except ReedSolomonError:
ad = b"Error decoding file metadata."
ad = ad.decode("utf-8")
fin.close()
# Insert the metadata into its text box
adArea["state"] = "normal"
adArea.delete("1.0",tkinter.END)
adArea.insert("1.0",ad)
adArea["state"] = "disabled"
# Update UI
adLabelString.set("File metadata (read only):")
keepBtn["state"] = "normal"
eraseBtn["state"] = "disabled"
rsBtn["state"] = "disabled"
cpasswordInput["state"] = "normal"
cpasswordInput.delete(0,"end")
cpasswordInput["state"] = "disabled"
cpasswordString.set("Confirm password (N/A):")
else:
# Update the UI
eraseBtn["state"] = "normal"
keepBtn["state"] = "disabled"
rsBtn["state"] = "normal"
adArea["state"] = "normal"
adArea.delete("1.0",tkinter.END)
suffix = " (will encrypt)"
adLabelString.set(adString)
cpasswordInput["state"] = "normal"
cpasswordInput.delete(0,"end")
cpasswordString.set("Confirm password:")
cpasswordLabel["state"] = "normal"
adLabel["state"] = "normal"
nFiles = len(files)
nFolders = len(draggedFolderPaths)
# Show selected file(s) and folder(s)
if (allFiles or files) and not draggedFolderPaths:
inputString.set(f"{nFiles} files selected (will encrypt).")
elif draggedFolderPaths and not files:
inputString.set(f"{nFolders} folder{'s' if nFolders!=1 else ''} selected (will encrypt).")
elif draggedFolderPaths and (allFiles or files):
inputString.set(
f"{nFiles} file{'s' if nFiles!=1 else ''} and "+
f"{nFolders} folder{'s' if nFolders!=1 else ''} selected (will encrypt)."
)
else:
inputString.set(inputFile.split("/")[-1]+suffix)
# Enable password box, etc.
passwordInput["state"] = "normal"
passwordInput.delete(0,"end")
passwordLabel["state"] = "normal"
startBtn["state"] = "normal"
statusString.set("Ready.")
status["state"] = "enabled"
progress["value"] = 0
# File decode error
except UnicodeDecodeError:
statusString.set(corruptedNotice)
progress["value"] = 100
# No file(s) selected, do nothing
except:
inputString.set("Drag and drop file(s) and folder(s) into this window.")
resetUI()
# Focus the dummy button to remove ugly borders
finally:
dummy.focus()
working = False
# Clears the selected files
def clearInputs():
dummy.focus()
resetUI()
# Allow drag and drop
def onDrop(e):
global working
if not working:
inputSelected(e.data)
tk.drop_target_register(DND_FILES)
tk.dnd_bind("<<Drop>>",onDrop)
# Label that displays selected input file
inputString = tkinter.StringVar(tk)
inputString.set("Drag and drop file(s) and folder(s) into this window.")
selectedInput = tkinter.ttk.Label(
tk,
textvariable=inputString
)
selectedInput.config(background="#ffffff")
selectedInput.place(x=17,y=16)
# Clear input files
clearInput = tkinter.ttk.Button(
tk,
text="Clear",
command=clearInputs
)
if platform.system()=="Darwin":
clearInput.place(x=398,y=14,width=64,height=24)
else:
clearInput.place(x=421,y=14,width=40,height=24)
separator = tkinter.ttk.Separator(
tk
)
separator.place(x=20,y=36,width=440)
# Label that prompts user to enter a password
passwordString = tkinter.StringVar(tk)
passwordString.set("Password:")
passwordLabel = tkinter.ttk.Label(
tk,
textvariable=passwordString
)
passwordLabel.place(x=17,y=46)
passwordLabel.config(background="#ffffff")
passwordLabel["state"] = "disabled"
# A frame to make password input fill width
passwordFrame = tkinter.Frame(
tk,
width=(445 if platform.system()=="Darwin" else 440),
height=22
)
passwordFrame.place(x=(17 if platform.system()=="Darwin" else 20),y=66)
passwordFrame.columnconfigure(0,weight=10)
passwordFrame.grid_propagate(False)
# Password input box
passwordInput = tkinter.ttk.Entry(
passwordFrame,
show="\u2022"
)
passwordInput.grid(sticky="nesw")
passwordInput["state"] = "disabled"
cpasswordString = tkinter.StringVar(tk)
cpasswordString.set("Confirm password:")
cpasswordLabel = tkinter.ttk.Label(
tk,
textvariable=cpasswordString
)
cpasswordLabel.place(x=17,y=96)
cpasswordLabel.config(background="#ffffff")
cpasswordLabel["state"] = "disabled"
# A frame to make confirm password input fill width
cpasswordFrame = tkinter.Frame(
tk,
width=(445 if platform.system()=="Darwin" else 440),
height=22
)
cpasswordFrame.place(x=(17 if platform.system()=="Darwin" else 20),y=116)
cpasswordFrame.columnconfigure(0,weight=10)
cpasswordFrame.grid_propagate(False)
# Confirm password input box
cpasswordInput = tkinter.ttk.Entry(
cpasswordFrame,
show="\u2022"
)
cpasswordInput.grid(sticky="nesw")
cpasswordInput["state"] = "disabled"
# Start the encryption/decryption process
def start():
global inputFile,outputFile,password,ad,kept
global working,gMode,headerRsc,allFiles,files
global dragFolderPath
dummy.focus()
reedsolo = False
chunkSize = 2**20
# Decide if encrypting or decrypting
if not inputFile.endswith(".pcv"):
mode = "encrypt"
gMode = "encrypt"
outputFile = inputFile+".pcv"
reedsolo = rs.get()==1
else:
mode = "decrypt"
gMode = "decrypt"
# Check if Reed-Solomon was enabled by checking for "+"
test = open(inputFile,"rb")
decider = test.read(1).decode("utf-8")
test.close()
if decider=="+":
reedsolo = True
# Decrypted output is just input file without the extension
outputFile = inputFile[:-4]
# Check if file already exists (getsize() throws error if file not found)
try:
getsize(outputFile)
force = messagebox.askyesno("Confirmation",overwriteNotice)
dummy.focus()
if force!=1:
return
except:
pass
# Disable inputs and buttons while encrypting/decrypting
disableAllInputs()
# Make sure passwords match
if passwordInput.get()!=cpasswordInput.get() and mode=="encrypt":
resetEncryptionUI()
statusString.set("Passwords don't match.")
return
# Set progress bar indeterminate
progress.config(mode="indeterminate")
progress.start(15)
statusString.set(rscNotice)
# Create Reed-Solomon object
if reedsolo:
# 13 bytes per 128 bytes, ~10% larger output file
rsc = RSCodec(13)
# Compress files together if user dragged multiple files
if allFiles or files:
statusString.set(compressingNotice)
tmp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if files:
zfPath = Path(files[0]).parent.absolute()
else:
zfPath = Path(dirname(allFiles[0])).parent.absolute()
zfOffset = len(str(zfPath))
zfName = pathJoin(zfPath,tmp+".zip")
zf = ZipFile(zfName,"w")
for i in allFiles:
zf.write(i,i[zfOffset:])
for i in files:
zf.write(i,pathSplit(i)[1])
zf.close()
inputFile = zfName
outputFile = zfName+".pcv"
outputPath = dirname(outputFile)
# Set and get some variables
working = True
headerBroken = False
reedsoloFixedCount = 0
reedsoloErrorCount = 0
dummy.focus()
password = passwordInput.get().encode("utf-8")
ad = adArea.get("1.0",tkinter.END).encode("utf-8")
wipe = erase.get()==1
# Open files
try:
fin = open(inputFile,"rb")
except:
resetEncryptionUI()
statusString.set("Folder is empty.")
return
if reedsolo and mode=="decrypt":
# Move pointer one forward
fin.read(1)
fout = open(outputFile,"wb+")
if reedsolo and mode=="encrypt":
# Signal that Reed-Solomon was enabled with a "+"
fout.write(b"+")
# Generate values for encryption if encrypting
if mode=="encrypt":
salt = urandom(16)
nonce = urandom(24)
# Reed-Solomon-encode metadata
ad = bytes(headerRsc.encode(ad))
# Write the metadata to output
tmp = str(len(ad)).encode("utf-8")
# Right-pad with "+"
while len(tmp)!=10:
tmp += b"+"
tmp = bytes(headerRsc.encode(tmp))
fout.write(tmp) # Length of metadata
fout.write(ad) # Metadata (associated data)
# Write zeros as placeholders, come back to write over it later.
# Note that 128 extra Reed-Solomon bytes are added
fout.write(b"0"*192) # SHA3-512 of encryption key
fout.write(b"0"*192) # CRC of file
fout.write(b"0"*144) # Poly1305 tag
# Reed-Solomon-encode salt and nonce
fout.write(bytes(headerRsc.encode(salt))) # Argon2 salt
fout.write(bytes(headerRsc.encode(nonce))) # ChaCha20 nonce
# If decrypting, read values from file
else:
# Move past metadata into actual data
tmp = fin.read(138)
if tmp[0]==43:
tmp = tmp[1:]+fin.read(1)
tmp = bytes(headerRsc.decode(tmp)[0])
tmp = tmp.replace(b"+",b"")
adlen = int(tmp.decode("utf-8"))
fin.read(int(adlen))
# Read the salt, nonce, etc.
cs = fin.read(192)
crccs = fin.read(192)
digest = fin.read(144)
salt = fin.read(144)
nonce = fin.read(152)
# Reed-Solomon-decode each value
try:
cs = bytes(headerRsc.decode(cs)[0])
except:
headerBroken = True
cs = cs[:64]
try:
crccs = bytes(headerRsc.decode(crccs)[0])
except:
headerBroken = True
crccs = crccs[:64]
try:
digest = bytes(headerRsc.decode(digest)[0])
except:
headerBroken = True
digest = digest[:16]
try:
salt = bytes(headerRsc.decode(salt)[0])
except:
headerBroken = True
salt = salt[:16]
try:
nonce = bytes(headerRsc.decode(nonce)[0])
except:
headerBroken = True
nonce = nonce[:24]
if headerBroken:
if keep.get()!=1:
statusString.set(veryCorruptedNotice)
fin.close()
fout.close()
remove(outputFile)
# Reset UI
resetDecryptionUI()
return
else:
kept = "badlyCorrupted"
# Show notice about key derivation
statusString.set(derivingNotice)
# Derive argon2id key
key = hash_secret_raw(
password,
salt,
time_cost=8, # 8 iterations
memory_cost=2**20, # 2^20 Kibibytes (1GiB)
parallelism=8, # 8 parallel threads
hash_len=32,
type=Type.ID
)
# Key deriving done, set progress bar determinate
progress.stop()
progress.config(mode="determinate")
progress["value"] = 0
# Compute hash of derived key
check = sha3_512.new()
check.update(key)
check = check.digest()
# If decrypting, check if key is correct
if mode=="decrypt":
# If key is incorrect...
if not compare_digest(check,cs):
if not headerBroken:
statusString.set(passwordNotice)
fin.close()
fout.close()
remove(outputFile)
# Reset UI
resetDecryptionUI()
return
# Create XChaCha20-Poly1305 object
cipher = ChaCha20_Poly1305.new(key=key,nonce=nonce)
# Cyclic redundancy check for file corruption
crc = sha3_512.new()
# Amount of data encrypted/decrypted, total file size, starting time
done = 0
total = getsize(inputFile)
# If secure wipe enabled, create a wiper object
# Keep track of time because it flies...
startTime = datetime.now()
previousTime = datetime.now()
# Continously read file in chunks of 1MB
while True:
if mode=="decrypt" and reedsolo:
# Read a chunk plus Reed-Solomon recovery bytes
piece = fin.read(1104905)
else:
piece = fin.read(chunkSize)
# If EOF
if not piece:
if mode=="encrypt":
# Get the cipher MAC tag (Poly1305)
digest = cipher.digest()
fout.flush()
fout.close()
fout = open(outputFile,"r+b")
# Compute the offset and seek to it (unshift "+")
rsOffset = 1 if reedsolo else 0
fout.seek(138+len(ad)+rsOffset)
# Write hash of key, CRC, and Poly1305 MAC tag
fout.write(bytes(headerRsc.encode(check)))
fout.write(bytes(headerRsc.encode(crc.digest())))
fout.write(bytes(headerRsc.encode(digest)))
else:
# If decrypting, verify CRC
crcdg = crc.digest()
if not compare_digest(crccs,crcdg):
# File is corrupted
statusString.set(corruptedNotice)
progress["value"] = 100
fin.close()
fout.close()
# If keep file not checked...
if keep.get()!=1:
remove(outputFile)
# Reset UI
resetDecryptionUI()
del fin,fout,cipher,key
return
else:
if not kept:
kept = "corrupted"
# Next, verify MAC tag (Poly1305)
try:
# Throws ValueError if incorrect Poly1305
cipher.verify(digest)
except:
if not reedsoloErrorCount and not headerBroken:
# File is modified
statusString.set(modifiedNotice)
progress["value"] = 100
fin.close()
fout.close()
# If keep file not checked...
if keep.get()!=1:
remove(outputFile)
# Reset UI
resetDecryptionUI()
del fin,fout,cipher,key
return
else:
if not kept:
kept = "modified"
break
# Encrypt/decrypt chunk and update CRC
if mode=="encrypt":
# Encrypt piece
data = cipher.encrypt(piece)
# Update checksum
crc.update(data)
if reedsolo:
# Encode using Reed-Solomon if user chooses
data = bytes(rsc.encode(data))
else:
# Basically encrypting but in reverse
if reedsolo:
try:
data,_,fixed = rsc.decode(piece)
except ReedSolomonError:
# File is really corrupted
if not reedsoloErrorCount:
if keep.get()!=1:
statusString.set(veryCorruptedNotice)
progress["value"] = 100
# If keep file not checked...
if keep.get()!=1:
fin.close()
fout.close()
remove(outputFile)
# Reset UI
resetDecryptionUI()
del fin,fout,cipher,key
return
else:
kept = "badlyCorrupted"
# Attempt to recover badly corrupted data
data = b""
piece = piece[:-13]
counter = 0
while True:
# Basically just strip the Reed-Solomon bytes
# and return the original non-encoded data
if counter<1104905:
data += piece[counter:counter+242]
counter += 255 # 255 bytes, 242 original
else:
break
fixed = bytearray()
reedsoloErrorCount += 1
data = bytes(data)
reedsoloFixedCount += len(fixed)
crc.update(data)
data = cipher.decrypt(data)
else:
crc.update(piece)
data = cipher.decrypt(piece)
# Calculate speed, ETA, etc.
elapsed = (datetime.now()-previousTime).total_seconds() or 0.0001
sinceStart = (datetime.now()-startTime).total_seconds() or 0.0001
previousTime = datetime.now()
percent = done*100/total
progress["value"] = percent
speed = (done/sinceStart)/10**6 or 0.0001
eta = round((total-done)/(speed*10**6))
# Seconds to minutes if seconds more than 59
if eta>=60:
# Set blank ETA if just starting
if sinceStart<0.5:
eta = "..."
else:
eta = f"{eta//60}m {eta%60}"
if isinstance(eta,int) or isinstance(eta,float):
if eta<0:
eta = 0
# Update status
info = f"{percent:.0f}% at {speed:.2f} MB/s (ETA: {eta}s)"
if reedsolo and mode=="decrypt" and reedsoloFixedCount:
tmp = "s" if reedsoloFixedCount!=1 else ""
info += f", fixed {reedsoloFixedCount} corrupted byte{tmp}"
if reedsolo and mode=="decrypt" and reedsoloErrorCount:
info += f", {reedsoloErrorCount} MB unrecoverable"
statusString.set(info)
# Increase done and write to output
done += 1104905 if (reedsolo and mode=="decrypt") else chunkSize
fout.write(data)
# Flush outputs, close files
if not kept:
fout.flush()
fsync(fout.fileno())
fout.close()
fin.close()
# Securely wipe files as necessary
if wipe:
if draggedFolderPaths:
for i in draggedFolderPaths:
secureWipe(i)
if files:
for i in range(len(files)):
statusString.set(erasingNotice+f" ({i}/{len(files)}")
progress["value"] = i/len(files)
secureWipe(files[i])
secureWipe(inputFile)
# Secure wipe not enabled
else:
if allFiles:
# Remove temporary zip file if created
remove(inputFile)
# Show appropriate notice if file corrupted or modified
if not kept:
statusString.set(f"Completed. (Click here to show output)")
# Show Reed-Solomon stats if it fixed corrupted bytes
if mode=="decrypt" and reedsolo and reedsoloFixedCount:
statusString.set(
f"Completed with {reedsoloFixedCount}"+
f" bytes fixed. (Output: {output})"
)
else:
if kept=="modified":
statusString.set(kModifiedNotice)
elif kept=="corrupted":
statusString.set(kCorruptedNotice)
else:
statusString.set(kVeryCorruptedNotice)
status.config(cursor="hand2")
# A little hack since strings are immutable
output = "".join([i for i in outputFile])
# Bind the output file
if platform.system()=="Windows":
status.bind("<Button-1>",
lambda e:showOutput(output.replace("/","\\"))
)
else:
status.bind("<Button-1>",
lambda e:showOutput(output)
)
# Reset variables and UI states
resetUI()
status["state"] = "normal"
inputFile = ""
outputFile = ""
password = ""
ad = ""
kept = False
working = False
allFiles = False
dragFolderPath = False
# Wipe keys for safety
del fin,fout,cipher,key
# Wraps the start() function with error handling
def wrapper():
global working,gMode
# Try start() and handle errors
try:
start()
except:
# Reset UI accordingly
if gMode=="decrypt":
resetDecryptionUI()
else:
resetEncryptionUI()
statusString.set(unknownErrorNotice)
dummy.focus()
finally:
sys.exit(0)
# Encryption/decrypt is done is a separate thread so the UI
# isn't blocked. This is a wrapper to spawn a thread and start it.
def startWorker():
thread = Thread(target=wrapper,daemon=True)
thread.start()
# Securely wipe file
def secureWipe(fin):
statusString.set(erasingNotice)
# Check platform, erase accordingly
if platform.system()=="Windows":
if isdir(fin):
paths = []
for i in Path(fin).rglob("*"):
if dirname(i) not in paths:
paths.append(dirname(i))
for i in range(len(paths)):
statusString.set(erasingNotice+f" ({i}/{len(paths)})")
progress["value"] = 100*i/len(paths)
system(f'cd "{paths[i]}" && "{rootDir}/sdelete64.exe" * -p 4 -s -nobanner')
system(f'cd "{rootDir}"')
rmtree(fin)
else:
statusString.set(erasingNotice)
progress["value"] = 100
system(f'sdelete64.exe "{fin}" -p 4 -nobanner')
elif platform.system()=="Darwin":
system(f'rm -rfP "{fin}"')
else:
system(f'shred -uz "{fin}" -n 4')
# Disable all inputs while encrypting/decrypting
def disableAllInputs():
passwordInput["state"] = "disabled"
cpasswordInput["state"] = "disabled"
adArea["state"] = "disabled"
startBtn["state"] = "disabled"
eraseBtn["state"] = "disabled"
keepBtn["state"] = "disabled"
rsBtn["state"] = "disabled"
# Reset UI to encryption state
def resetEncryptionUI():
global working
passwordInput["state"] = "normal"
cpasswordInput["state"] = "normal"
adArea["state"] = "normal"
startBtn["state"] = "normal"
eraseBtn["state"] = "normal"
rsBtn["state"] = "normal"
working = False
progress.stop()
progress.config(mode="determinate")
progress["value"] = 100
# Reset UI to decryption state
def resetDecryptionUI():
global working
passwordInput["state"] = "normal"
adArea["state"] = "normal"
startBtn["state"] = "normal"
keepBtn["state"] = "normal"
working = False
progress.stop()
progress.config(mode="determinate")
progress["value"] = 100
# Reset UI to original state (no file selected)
def resetUI():
adArea["state"] = "normal"
adArea.delete("1.0",tkinter.END)
adArea["state"] = "disabled"
adLabel["state"] = "disabled"
startBtn["state"] = "disabled"
passwordInput["state"] = "normal"
passwordInput.delete(0,"end")
passwordInput["state"] = "disabled"
passwordLabel["state"] = "disabled"
cpasswordInput["state"] = "normal"
cpasswordInput.delete(0,"end")
cpasswordInput["state"] = "disabled"
cpasswordString.set("Confirm password:")
cpasswordLabel["state"] = "disabled"
status["state"] = "disabled"
progress["value"] = 0
inputString.set("Drag and drop file(s) and folder(s) into this window.")
keepBtn["state"] = "normal"
keep.set(0)
keepBtn["state"] = "disabled"
eraseBtn["state"] = "normal"
erase.set(0)
eraseBtn["state"] = "disabled"
rs.set(0)
rsBtn["state"] = "disabled"
progress.stop()
progress.config(mode="determinate")
progress["value"] = 0
def showOutput(file):
if platform.system()=="Windows":
system(f'explorer /select,"{file}"')
elif platform.system()=="Darwin":
system(f'cd "{dirname(file)}"; open -R {pathSplit(file)[1]}')
system(f'cd "{rootDir}"')
else:
system(f'xdg-open "{dirname(file)}"')
# ad stands for "associated data"/metadata
adLabelString = tkinter.StringVar(tk)
adLabelString.set(adString)
adLabel = tkinter.ttk.Label(
tk,
textvariable=adLabelString
)
adLabel.place(x=17,y=148)
adLabel.config(background="#ffffff")
adLabel["state"] = "disabled"
# Frame so metadata text box can fill width
adFrame = tkinter.Frame(
tk,
width=440,
height=100
)
adFrame.place(x=20,y=168)
adFrame.columnconfigure(0,weight=10)
adFrame.grid_propagate(False)
# Metadata text box
adArea = tkinter.Text(
adFrame,
exportselection=0
)
adArea.config(font=("Consolas",12))
adArea.grid(sticky="we")
adArea["state"] = "disabled"
# Check box for keeping corrupted/modified output
keep = tkinter.IntVar()
keepBtn = tkinter.ttk.Checkbutton(
tk,
text=keepNotice,
variable=keep,
onvalue=1,
offvalue=0,
command=lambda:dummy.focus()
)
keepBtn.place(x=18,y=280)
keepBtn["state"] = "disabled"
# Check box for securely erasing original file
erase = tkinter.IntVar()
eraseBtn = tkinter.ttk.Checkbutton(
tk,
text=eraseNotice,
variable=erase,
onvalue=1,
offvalue=0,
command=lambda:dummy.focus()
)
eraseBtn.place(x=18,y=300)
eraseBtn["state"] = "disabled"
# Check box for Reed Solomon
rs = tkinter.IntVar()
rsBtn = tkinter.ttk.Checkbutton(
tk,
text=rsNotice,
variable=rs,
onvalue=1,
offvalue=0,
command=lambda:dummy.focus()
)
rsBtn.place(x=18,y=320)
rsBtn["state"] = "disabled"
# Frame so start button can fill width
startFrame = tkinter.Frame(
tk,
width=442,
height=24
)
startFrame.place(x=19,y=350)
startFrame.columnconfigure(0,weight=10)
startFrame.grid_propagate(False)
# Start button
startBtn = tkinter.ttk.Button(
startFrame,
text="Start",
command=startWorker
)
startBtn.grid(sticky="nesw")
startBtn["state"] = "disabled"
# Progress bar
progress = tkinter.ttk.Progressbar(
tk,
orient=tkinter.HORIZONTAL,
length=440,
mode="determinate"
)
progress.place(x=20,y=378)
# Status label
statusString = tkinter.StringVar(tk)
statusString.set("Ready.")
status = tkinter.ttk.Label(
tk,
textvariable=statusString
)
status.place(x=17,y=406)
status.config(background="#ffffff")
status["state"] = "disabled"
# Credits :)
hint = "Created by Evan Su. Click for details and source."
creditsString = tkinter.StringVar(tk)
creditsString.set(hint)
credits = tkinter.ttk.Label(
tk,
textvariable=creditsString,
cursor="hand2"
)
credits["state"] = "disabled"
credits.config(background="#ffffff")
credits.place(x=17,y=436)
source = "https://github.com/HACKERALERT/Picocrypt"
credits.bind("<Button-1>",lambda e:webbrowser.open(source))
# Version
versionString = tkinter.StringVar(tk)
versionString.set("v1.11")
version = tkinter.ttk.Label(
tk,
textvariable=versionString
)
version["state"] = "disabled"
version.config(background="#ffffff")
version.place(x=(420 if platform.system()=="Darwin" else 430),y=436)
# Dummy button to remove focus from other buttons
# and prevent ugly border highlighting
dummy = tkinter.ttk.Button(
tk
)
dummy.place(x=480,y=0)
# Function to create Reed-Solomon header codec
def createRsc():
global headerRsc
headerRsc = RSCodec(128)
sys.exit(0)
def prepare():
if platform.system()=="Windows":
system("sdelete64.exe /accepteula")
# Close window only if not encrypting or decrypting
def onClose():
global outputFile
if not working:
tk.destroy()
else:
force = messagebox.askyesno("Confirmation",cancelNotice)
if force:
tk.destroy()
# Main application loop
if __name__=="__main__":
# Create Reed-Solomon header codec
tmp = Thread(target=createRsc,daemon=True)
tmp.start()
# Prepare application
tmp = Thread(target=prepare,daemon=True)
tmp.start()
# Start tkinter
tk.protocol("WM_DELETE_WINDOW",onClose)
tk.mainloop()
sys.exit(0)
|
py | b409d3f1f07a17f6c3bd27ccd61672a42dcd77c2 | import discord
import os
TOKEN = os.environ['DISCORD_BOT_TOKEN']
# 接続に必要なオブジェクトを生成
client = discord.Client()
# 起動時に動作する処理
@client.event
async def on_ready():
CHANNEL_ID = 798522685042851850
channel = client.get_channel(CHANNEL_ID)
print('ハロー。観測儀ラプラス、起動しました。')
await channel.send('Hello.\nConfiguration code updates have been confirmed.\nRestarting.')
@client.event
async def on_member_join(member):
message.channel.send(f"{message.author.display_name}'s join has been confirmed.\nWelcome to the Discord Crisis Countermeasures Organization.")
# メッセージ受信時に動作する処理
@client.event
async def on_message(message):
if message.author.bot:
return
if 'Laplace' in message.content:
await message.channel.send(f'Hello,{message.author.display_name}.\nWhat do you need?')
if message.content == ('Laplace、BAN')
await
#オペレーター申請システム
@client.event
async def on_message(message):
if message.author.bot:
return
if message.guild is None and ('Apply' in message.content or 'apply' in message.content):
await message.author.send('Ok.\nYour operator application has been confirmed.\nPlease set up a code name in order to register it in the database.')
# Botの起動とDiscordサーバーへの接続
client.run(TOKEN)
|
py | b409d4e51a71450ec778eeb1fda22e26d200bcf6 | #!/usr/bin/python
# coding: utf8
import geocoder
def test_entry_points():
geocoder.ip
geocoder.osm
geocoder.w3w
geocoder.bing
geocoder.here
geocoder.tgos
geocoder.baidu
geocoder.gaode
geocoder.yahoo
geocoder.mapbox
geocoder.google
geocoder.yandex
geocoder.tomtom
geocoder.arcgis
geocoder.ipinfo
geocoder.mapzen
geocoder.geonames
geocoder.mapquest
geocoder.timezone
geocoder.maxmind
geocoder.elevation
geocoder.freegeoip
geocoder.geolytica
geocoder.timezone
geocoder.opencage
geocoder.places
geocoder.canadapost
geocoder.tamu
geocoder.geocodefarm
geocoder.uscensus
def test_location():
g = geocoder.location('45.4215296, -75.6971931')
assert g.ok
g = geocoder.location({'lat': 45.4215296, 'lng': -75.6971931})
assert g.ok
g = geocoder.location([45.4215296, -75.6971931])
assert g.ok
|
py | b409d53390924934190376488e3f69385998af95 | import typer
from typer_example.tournament_menu import TournamentMenu
class MainMenu:
# typer_app = typer.Typer()
# typer_app.add_typer(TournamentMenu.typer_app, name="tournament")
def __init__(self):
self.tournament_handler = None
self.print_menu()
self.user_selection()
# self.typer_app()
def print_menu(self):
number = typer.style("1. ", bold=True)
typer.echo(number + "Tournois")
number = typer.style("2. ", bold=True)
typer.echo(number + "Gérer les joueurs")
def user_selection(self):
selection = typer.prompt("Entrez votre sélection: ")
typer.echo("\n")
if selection == "1":
self.open_tournament_menu()
else:
self.user_selection()
def open_tournament_menu(self):
TournamentMenu() |
py | b409d695a73e879915adfd077272ed3d9df89349 | import textwrap
import warnings
from pandas.core.indexes.base import (Index,
_new_Index,
ensure_index,
ensure_index_from_sequences,
InvalidIndexError) # noqa
from pandas.core.indexes.category import CategoricalIndex # noqa
from pandas.core.indexes.multi import MultiIndex # noqa
from pandas.core.indexes.interval import IntervalIndex # noqa
from pandas.core.indexes.numeric import (NumericIndex, Float64Index, # noqa
Int64Index, UInt64Index)
from pandas.core.indexes.range import RangeIndex # noqa
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.datetimes import DatetimeIndex
import pandas.core.common as com
from pandas._libs import lib, NaT
_sort_msg = textwrap.dedent("""\
Sorting because non-concatenation axis is not aligned. A future version
of pandas will change to not sort by default.
To accept the future behavior, pass 'sort=False'.
To retain the current behavior and silence the warning, pass 'sort=True'.
""")
# TODO: there are many places that rely on these private methods existing in
# pandas.core.index
__all__ = ['Index', 'MultiIndex', 'NumericIndex', 'Float64Index', 'Int64Index',
'CategoricalIndex', 'IntervalIndex', 'RangeIndex', 'UInt64Index',
'InvalidIndexError', 'TimedeltaIndex',
'PeriodIndex', 'DatetimeIndex',
'_new_Index', 'NaT',
'ensure_index', 'ensure_index_from_sequences',
'_get_combined_index',
'_get_objs_combined_axis', '_union_indexes',
'_get_consensus_names',
'_all_indexes_same']
def _get_objs_combined_axis(objs, intersect=False, axis=0, sort=True):
# Extract combined index: return intersection or union (depending on the
# value of "intersect") of indexes on given axis, or None if all objects
# lack indexes (e.g. they are numpy arrays)
obs_idxes = [obj._get_axis(axis) for obj in objs
if hasattr(obj, '_get_axis')]
if obs_idxes:
return _get_combined_index(obs_idxes, intersect=intersect, sort=sort)
def _get_combined_index(indexes, intersect=False, sort=False):
# TODO: handle index names!
indexes = com._get_distinct_objs(indexes)
if len(indexes) == 0:
index = Index([])
elif len(indexes) == 1:
index = indexes[0]
elif intersect:
index = indexes[0]
for other in indexes[1:]:
index = index.intersection(other)
else:
index = _union_indexes(indexes, sort=sort)
index = ensure_index(index)
if sort:
try:
index = index.sort_values()
except TypeError:
pass
return index
def _union_indexes(indexes, sort=True):
if len(indexes) == 0:
raise AssertionError('Must have at least 1 Index to union')
if len(indexes) == 1:
result = indexes[0]
if isinstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _unique_indices(inds):
def conv(i):
if isinstance(i, Index):
i = i.tolist()
return i
return Index(
lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort))
if kind == 'special':
result = indexes[0]
if hasattr(result, 'union_many'):
return result.union_many(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == 'array':
index = indexes[0]
for other in indexes[1:]:
if not index.equals(other):
if sort is None:
# TODO: remove once pd.concat sort default changes
warnings.warn(_sort_msg, FutureWarning, stacklevel=8)
sort = True
return _unique_indices(indexes)
name = _get_consensus_names(indexes)[0]
if name != index.name:
index = index._shallow_copy(name=name)
return index
else: # kind='list'
return _unique_indices(indexes)
def _sanitize_and_check(indexes):
kinds = list({type(index) for index in indexes})
if list in kinds:
if len(kinds) > 1:
indexes = [Index(com._try_sort(x))
if not isinstance(x, Index) else
x for x in indexes]
kinds.remove(list)
else:
return indexes, 'list'
if len(kinds) > 1 or Index not in kinds:
return indexes, 'special'
else:
return indexes, 'array'
def _get_consensus_names(indexes):
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
consensus_names = set(tuple(i.names) for i in indexes
if com._any_not_none(*i.names))
if len(consensus_names) == 1:
return list(list(consensus_names)[0])
return [None] * indexes[0].nlevels
def _all_indexes_same(indexes):
first = indexes[0]
for index in indexes[1:]:
if not first.equals(index):
return False
return True
|
py | b409d69a5b7cce018ed72c28ec905e888f5c3d36 | import os
import time
import sys
import glob
from gym_idsgame.config.runner_mode import RunnerMode
from gym_idsgame.agents.training_agents.policy_gradient.pg_agent_config import PolicyGradientAgentConfig
from gym_idsgame.agents.dao.agent_type import AgentType
from gym_idsgame.config.client_config import ClientConfig
from gym_idsgame.config.hp_tuning_config import HpTuningConfig
from gym_idsgame.runnner import Runner
from experiments.util import plotting_util, util, hp_tuning
def get_script_path():
"""
:return: the script path
"""
return os.path.dirname(os.path.realpath(sys.argv[0]))
def default_output_dir() -> str:
"""
:return: the default output dir
"""
script_dir = get_script_path()
return script_dir
def default_config_path() -> str:
"""
:return: the default path to configuration file
"""
config_path = os.path.join(default_output_dir(), './config.json')
return config_path
def hp_tuning_config(client_config: ClientConfig) -> ClientConfig:
"""
Setup config for hparam tuning
:param client_config: the client config
:return: the updated client config
"""
client_config.hp_tuning = True
client_config.hp_tuning_config = HpTuningConfig(param_1="alpha", param_2="num_hidden_layers",
alpha=[0.000001, 0.00001, 0.0001, 0.001, 0.01],
num_hidden_layers=[1, 2, 4, 8, 16])
client_config.run_many = False
return client_config
def default_config() -> ClientConfig:
"""
:return: Default configuration for the experiment
"""
pg_agent_config = PolicyGradientAgentConfig(gamma=0.999, alpha_attacker=0.00001, epsilon=1, render=False, eval_sleep=0.9,
min_epsilon=0.01, eval_episodes=100, train_log_frequency=100,
epsilon_decay=0.9999, video=True, eval_log_frequency=1,
video_fps=5, video_dir=default_output_dir() + "/results/videos",
num_episodes=200001,
eval_render=False, gifs=True,
gif_dir=default_output_dir() + "/results/gifs",
eval_frequency=10000, attacker=False, defender=True, video_frequency=101,
save_dir=default_output_dir() + "/results/data",
checkpoint_freq=5000, input_dim_attacker=33, output_dim_defender=33, hidden_dim=64,
num_hidden_layers=1, batch_size=32,
gpu=False, tensorboard=True,
tensorboard_dir=default_output_dir() + "/results/tensorboard",
optimizer="Adam", lr_exp_decay=False, lr_decay_rate=0.999)
env_name = "idsgame-random_attack-v8"
client_config = ClientConfig(env_name=env_name, defender_type=AgentType.REINFORCE_AGENT.value,
mode=RunnerMode.TRAIN_DEFENDER.value,
pg_agent_config=pg_agent_config, output_dir=default_output_dir(),
title="RandomAttacker vs REINFORCE",
run_many=False, random_seeds=[0, 999, 299, 399, 499])
client_config = hp_tuning_config(client_config)
return client_config
def write_default_config(path:str = None) -> None:
"""
Writes the default configuration to a json file
:param path: the path to write the configuration to
:return: None
"""
if path is None:
path = default_config_path()
config = default_config()
util.write_config_file(config, path)
def plot_csv(config: ClientConfig, eval_csv_path:str, train_csv_path: str, random_seed : int = 0) -> None:
"""
Plot results from csv files
:param config: client config
:param eval_csv_path: path to the csv file with evaluation results
:param train_csv_path: path to the csv file with training results
:param random_seed: the random seed of the experiment
:return: None
"""
plotting_util.read_and_plot_results(train_csv_path, eval_csv_path, config.pg_agent_config.train_log_frequency,
config.pg_agent_config.eval_frequency, config.pg_agent_config.eval_log_frequency,
config.pg_agent_config.eval_episodes, config.output_dir, sim=False,
random_seed = random_seed)
def plot_average_results(experiment_title :str, config: ClientConfig, eval_csv_paths:list,
train_csv_paths: str) -> None:
"""
Plots average results after training with different seeds
:param experiment_title: title of the experiment
:param config: experiment config
:param eval_csv_paths: paths to csv files with evaluation data
:param train_csv_paths: path to csv files with training data
:return: None
"""
plotting_util.read_and_plot_average_results(experiment_title, train_csv_paths, eval_csv_paths,
config.pg_agent_config.train_log_frequency,
config.pg_agent_config.eval_frequency,
config.output_dir,
plot_attacker_loss = True, plot_defender_loss = False)
def run_experiment(configpath: str, random_seed: int, noconfig: bool):
"""
Runs one experiment and saves results and plots
:param configpath: path to configfile
:param noconfig: whether to override config
:return: (train_csv_path, eval_csv_path)
"""
if configpath is not None and not noconfig:
if not os.path.exists(args.configpath):
write_default_config()
config = util.read_config(args.configpath)
else:
config = default_config()
time_str = str(time.time())
util.create_artefact_dirs(config.output_dir, random_seed)
logger = util.setup_logger("random_attack_vs_reinforce-v8", config.output_dir + "/results/logs/" +
str(random_seed) + "/",
time_str=time_str)
config.pg_agent_config.save_dir = default_output_dir() + "/results/data/" + str(random_seed) + "/"
config.pg_agent_config.video_dir= default_output_dir() + "/results/videos/" + str(random_seed) + "/"
config.pg_agent_config.gif_dir= default_output_dir() + "/results/gifs/" + str(random_seed) + "/"
config.pg_agent_config.tensorboard_dir = default_output_dir() + "/results/tensorboard/" \
+ str(random_seed) + "/"
config.logger = logger
config.pg_agent_config.logger = logger
config.pg_agent_config.random_seed = random_seed
config.random_seed = random_seed
config.pg_agent_config.to_csv(config.output_dir + "/results/hyperparameters/" + str(random_seed) + "/" + time_str + ".csv")
train_csv_path = ""
eval_csv_path = ""
if config.hp_tuning:
hp_tuning.hype_grid(config)
else:
train_result, eval_result = Runner.run(config)
if len(train_result.avg_episode_steps) > 0 and len(eval_result.avg_episode_steps) > 0:
train_csv_path = config.output_dir + "/results/data/" + str(random_seed) + "/" + time_str + "_train" + ".csv"
train_result.to_csv(train_csv_path)
eval_csv_path = config.output_dir + "/results/data/" + str(random_seed) + "/" + time_str + "_eval" + ".csv"
eval_result.to_csv(eval_csv_path)
plot_csv(config, eval_csv_path, train_csv_path, random_seed)
return train_csv_path, eval_csv_path
# Program entrypoint
if __name__ == '__main__':
args = util.parse_args(default_config_path())
experiment_title = "random attack vs REINFORCE"
if args.configpath is not None and not args.noconfig:
if not os.path.exists(args.configpath):
write_default_config()
config = util.read_config(args.configpath)
else:
config = default_config()
if args.plotonly:
base_dir = default_output_dir() + "/results/data/"
train_csv_paths = []
eval_csv_paths = []
for seed in config.random_seeds:
train_csv_path = glob.glob(base_dir + str(seed) + "/*_train.csv")[0]
eval_csv_path = glob.glob(base_dir + str(seed) + "/*_eval.csv")[0]
train_csv_paths.append(train_csv_path)
eval_csv_paths.append(eval_csv_path)
plot_csv(config, eval_csv_path, train_csv_path, random_seed=seed)
try:
plot_average_results(experiment_title, config, eval_csv_paths, train_csv_paths)
except Exception as e:
print("Error when trying to plot summary: " + str(e))
else:
if not config.run_many:
run_experiment(args.configpath, 0, args.noconfig)
else:
train_csv_paths = []
eval_csv_paths = []
for seed in config.random_seeds:
train_csv_path, eval_csv_path = run_experiment(args.configpath, seed, args.noconfig)
train_csv_paths.append(train_csv_path)
eval_csv_paths.append(eval_csv_path)
try:
plot_average_results(experiment_title, config, eval_csv_paths, train_csv_paths)
except Exception as e:
print("Error when trying to plot summary: " + str(e))
|
py | b409d6fb40b7750be2bd4cfa1916a286e6494151 | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Handle the interface to GGRC models for all login methods.
"""
import flask
from ggrc import db
from ggrc.utils.log_event import log_event
def get_next_url(request, default_url):
"""Returns next url from requres or default url if it's not found."""
if 'next' in request.args:
next_url = request.args['next']
return next_url
return default_url
def commit_user_and_role(user):
"""Commits and flushes user and its role after the login."""
db_user, db_role = None, None
if hasattr(flask.g, "user_cache"):
db_user = flask.g.user_cache.get(user.email, None)
if hasattr(flask.g, "user_creator_roles_cache"):
db_role = flask.g.user_creator_roles_cache.get(user.email, None)
if db_user or db_role:
db.session.flush()
if db_user:
log_event(db.session, db_user, db_user.id, flush=False)
elif db_role:
# log_event of user includes event of role creation.
# if no user in cache, then it was created before but has no role.
log_event(db.session, db_role, user.id, flush=False)
db.session.commit()
|
py | b409d79a7948e83981d4b0618f69a20e45926a64 |
'''
haplotype_transcript_stats.py
Calculate stats on haplotype-specific transcripts shared
between samples, populations and super populations.
'''
import sys
import gzip
from utils import *
printScriptHeader()
if len(sys.argv) != 3:
print("Usage: python haplotype_transcript_stats.py <1kg_pop_info> <hap_txp_info> > stats.txt\n")
sys.exit(1)
pop_file = open(sys.argv[1], "r")
pop_data = {}
for line in pop_file:
line_split = line.split("\t")
assert(len(line_split) >= 4)
if line_split[0] != "sample":
pop_data[line_split[0]] = (line_split[1], line_split[2])
pop_file.close()
sys.stderr.write(str(len(pop_data)) + "\n")
hap_file = open(sys.argv[2], "r")
hap_data = []
hap_index = {}
for line in hap_file:
line_split = line.split("\t")
assert(len(line_split) == 5)
if line_split[0] != "Name":
hap_data.append([x.split("_")[2] for x in line_split[4].split(",")])
for hap in hap_data[-1]:
if hap in hap_index:
hap_index[hap].append(len(hap_data) - 1)
else:
hap_index[hap] = [len(hap_data) - 1]
hap_file.close()
sys.stderr.write(str(len(hap_data)) + "\n")
sys.stderr.write(str(len(hap_index)) + "\n")
print("name\tpop\tspop\ttotal\tnum_sam\tnum_pop\tnum_spop")
for pop_key, pop_value in pop_data.items():
num_haps = 0
num_sam = 0
num_pop = 0
num_spop = 0
for hap_idx in hap_index[pop_key]:
num_haps += 1
has_sam = False
has_pop = False
has_spop = False
for hap in hap_data[hap_idx]:
pop = pop_data[hap]
if not has_sam and hap != pop_key:
num_sam += 1
has_sam = True
if not has_pop and pop[0] != pop_value[0]:
num_pop += 1
has_pop = True
if not has_spop and pop[1] != pop_value[1]:
num_spop += 1
has_spop = True
if has_sam and has_pop and has_spop:
break
print(pop_key + "\t" + pop_value[0] + "\t" + pop_value[1] + "\t" + str(num_haps) + "\t" + str(num_sam) + "\t" + str(num_pop) + "\t" + str(num_spop))
sys.stdout.flush()
|
py | b409d7ecdd9f6261e15f12f59c6a8e26ecf00f55 | from hasta.http.main import Main
import socket
class Init:
def __init__(self):
self.main = Main()
def __call__(self, host , port):
self.data(host , port)
self.main(self.host , self.port , self.ip , self.protocol, socket)
def data(self , ip, port):
self.ip = socket.AF_INET # IPv4 .
self.protocol = socket.SOCK_STREAM # TCP .
self.host = host # Host IP address.
self.port = port # Host port.
|
py | b409d8db4579a242dc20f025cf95895c543df51e | # Copyright (c) Yugabyte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
#
import os
import sys
import re
import subprocess
import platform
import logging
from typing import List, Any, Set, Optional, Pattern
from yugabyte_db_thirdparty.os_detection import is_mac, is_linux
from yugabyte_db_thirdparty.custom_logging import log, fatal, heading
from yugabyte_db_thirdparty.util import YB_THIRDPARTY_DIR
from build_definitions import BUILD_TYPES
def compile_re_list(re_list: List[str]) -> Any:
return re.compile("|".join(re_list))
class LibTestBase:
"""
Verify correct library paths are used in installed dynamically-linked executables and
libraries.
"""
lib_re_list: List[str]
tool: str
def __init__(self) -> None:
self.tp_installed_dir = os.path.join(YB_THIRDPARTY_DIR, 'installed')
self.lib_re_list = []
def init_regex(self) -> None:
self.okay_paths = compile_re_list(self.lib_re_list)
def check_lib_deps(
self,
file_path: str,
cmdout: str,
additional_allowed_pattern: Optional[Pattern] = None) -> bool:
status = True
for line in cmdout.splitlines():
if (not self.okay_paths.match(line) and
not (additional_allowed_pattern and
additional_allowed_pattern.match(line))):
if status:
log(file_path + ":")
status = False
log("Bad path: %s", line)
return status
# overridden in platform specific classes
def good_libs(self, file_path: str) -> bool:
raise NotImplementedError()
def run(self) -> None:
self.init_regex()
heading("Scanning installed executables and libraries...")
test_pass = True
# files to examine are much reduced if we look only at bin and lib directories
dir_pattern = re.compile('^(lib|libcxx|[s]bin)$')
dirs = [os.path.join(self.tp_installed_dir, type) for type in BUILD_TYPES]
for installed_dir in dirs:
if not os.path.isdir(installed_dir):
logging.info("Directory %s does not exist, skipping", installed_dir)
continue
with os.scandir(installed_dir) as candidate_dirs:
for candidate in candidate_dirs:
if dir_pattern.match(candidate.name):
examine_path = os.path.join(installed_dir, candidate.name)
for dirpath, dirnames, files in os.walk(examine_path):
for file_name in files:
full_path = os.path.join(dirpath, file_name)
if os.path.islink(full_path):
continue
if not self.good_libs(full_path):
test_pass = False
if not test_pass:
fatal(f"Found problematic library dependencies, using tool: {self.tool}")
else:
log("No problems found with library dependencies.")
def add_allowed_shared_lib_paths(self, shared_lib_paths: Set[str]) -> None:
pass
class LibTestMac(LibTestBase):
def __init__(self) -> None:
super().__init__()
self.tool = "otool -L"
self.lib_re_list = ["^\t/usr/",
"^\t/System/Library/",
"^Archive ",
"^/",
"^\t@rpath",
"^\t@loader_path",
f"^\t{YB_THIRDPARTY_DIR}"]
def add_allowed_shared_lib_paths(self, shared_lib_paths: Set[str]) -> None:
# TODO: implement this on macOS for more precise checking of allowed dylib paths.
pass
def good_libs(self, file_path: str) -> bool:
libout = subprocess.check_output(['otool', '-L', file_path]).decode('utf-8')
if 'is not an object file' in libout:
return True
return self.check_lib_deps(file_path, libout)
class LibTestLinux(LibTestBase):
LIBCXX_NOT_FOUND = re.compile('^\tlibc[+][+][.]so[.][0-9]+ => not found')
def __init__(self) -> None:
super().__init__()
self.tool = "ldd"
self.lib_re_list = [
"^\tlinux-vdso",
"^\t/lib64/",
"^\t/opt/yb-build/brew/linuxbrew",
"^\tstatically linked",
"^\tnot a dynamic executable",
"ldd: warning: you do not have execution permission",
"^.* => /lib64/",
"^.* => /lib/",
"^.* => /usr/lib/x86_64-linux-gnu/",
"^.* => /opt/yb-build/brew/linuxbrew",
f"^.* => {re.escape(YB_THIRDPARTY_DIR)}"
]
def add_allowed_shared_lib_paths(self, shared_lib_paths: Set[str]) -> None:
for shared_lib_path in sorted(shared_lib_paths):
self.lib_re_list.append(f".* => {re.escape(shared_lib_path)}/")
def good_libs(self, file_path: str) -> bool:
try:
libout = subprocess.check_output(
['ldd', file_path],
stderr=subprocess.STDOUT, env={'LC_ALL': 'en_US.UTF-8'}).decode('utf-8')
except subprocess.CalledProcessError as ex:
if ex.returncode > 1:
log("Unexpected exit code %d from ldd, file %s", ex.returncode, file_path)
log(ex.stdout.decode('utf-8'))
return False
libout = ex.stdout.decode('utf-8')
file_basename = os.path.basename(file_path)
additional_allowed_pattern = None
if file_basename.startswith('libc++abi.so.'):
# One exception: libc++abi.so is not able to find libc++ because it loads the ASAN
# runtime library that is part of the LLVM distribution and does not have the correct
# rpath set. This happens on CentOS with our custom build of LLVM. We might be able to
# fix this by specifyng rpath correctly when building LLVM, but as of 12/2020 we just
# ignore this error here.
#
# $ ldd installed/asan/libcxx/lib/libc++abi.so.1.0
# linux-vdso.so.1 =>
# libclang_rt.asan-x86_64.so =>
# $LLVM_DIR/lib/clang/11.0.0/lib/linux/libclang_rt.asan-x86_64.so
# libclang_rt.ubsan_minimal-x86_64.so =>
# $LLVM_DIR/lib/clang/11.0.0/lib/linux/libclang_rt.ubsan_minimal-x86_64.so
# libunwind.so.1 => installed/common/lib/libunwind.so.1
# libdl.so.2 => /lib64/libdl.so.2
# libpthread.so.0 => /lib64/libpthread.so.0
# libm.so.6 => /lib64/libm.so.6
# libc.so.6 => /lib64/libc.so.6
# libc++.so.1 => not found <-- THIS IS OK
# libgcc_s.so.1 => /lib64/libgcc_s.so.1
# librt.so.1 => /lib64/librt.so.1
# /lib64/ld-linux-x86-64.so.2
#
# Run
# LD_DEBUG=all ldd installed/asan/libcxx/lib/libc++abi.so.1.0
# and notice the following line:
#
# file=libc++.so.1 [0];
# needed by $LLVM_DIR/lib/clang/11.0.0/lib/linux/libclang_rt.asan-x86_64.so
#
# Also running
# ldd $LLVM_DIR/lib/clang/11.0.0/lib/linux/libclang_rt.asan-x86_64.so
#
# reports "libc++.so.1 => not found".
additional_allowed_pattern = self.LIBCXX_NOT_FOUND
return self.check_lib_deps(file_path, libout, additional_allowed_pattern)
def get_lib_tester() -> LibTestBase:
if is_mac():
return LibTestMac()
if is_linux():
return LibTestLinux()
fatal(f"Unsupported platform: {platform.system()}")
|
py | b409d94e0e1e10f30355d85c61743e582f4f6358 | import tensorflow as tf
from text import symbols
def create_hparams(hparams_string=None, verbose=False):
"""Create model hyperparameters. Parse nondefault from given string."""
hparams = tf.contrib.training.HParams(
################################
# Experiment Parameters #
################################
epochs=500,
iters_per_checkpoint=1000,
seed=1234,
dynamic_loss_scaling=True,
fp16_run=False,
distributed_run=False,
dist_backend="nccl",
dist_url="tcp://localhost:54321",
cudnn_enabled=True,
cudnn_benchmark=False,
ignore_layers=['embedding.weight'],
################################
# Data Parameters #
################################
load_mel_from_disk=False,
training_files='filelists/vivos_audio_text_train_filelist.txt',
validation_files='filelists/vivos_audio_text_val_filelist.txt',
#text_cleaners=['english_cleaners'],
text_cleaners=['basic_cleaners'],
################################
# Audio Parameters #
################################
max_wav_value=32768.0,
# sampling_rate=22050,
sampling_rate=16000,
filter_length=1024,
hop_length=256,
win_length=1024,
n_mel_channels=80,
mel_fmin=0.0,
mel_fmax=8000.0,
################################
# Model Parameters #
################################
n_symbols=len(symbols),
symbols_embedding_dim=512,
# Encoder parameters
encoder_kernel_size=5,
encoder_n_convolutions=3,
encoder_embedding_dim=512,
# Decoder parameters
n_frames_per_step=1, # currently only 1 is supported
decoder_rnn_dim=1024,
prenet_dim=256,
max_decoder_steps=1000,
gate_threshold=0.5,
p_attention_dropout=0.1,
p_decoder_dropout=0.1,
# Attention parameters
attention_rnn_dim=1024,
attention_dim=128,
# Location Layer parameters
attention_location_n_filters=32,
attention_location_kernel_size=31,
# Mel-post processing network parameters
postnet_embedding_dim=512,
postnet_kernel_size=5,
postnet_n_convolutions=5,
################################
# Optimization Hyperparameters #
################################
use_saved_learning_rate=False,
learning_rate=1e-3,
weight_decay=1e-6,
grad_clip_thresh=1.0,
batch_size=64,
mask_padding=True # set model's padded outputs to padded values
)
if hparams_string:
tf.logging.info('Parsing command line hparams: %s', hparams_string)
hparams.parse(hparams_string)
if verbose:
tf.logging.info('Final parsed hparams: %s', hparams.values())
return hparams
|
py | b409d94fba0af5a1f7637376cef701ec4867b2ac | #!/usr/bin/env python3
from .memoize import cached
from .linear_cg import linear_cg
from .stochastic_lq import StochasticLQ
from . import cholesky
from . import eig
from . import fft
from . import grid
from . import interpolation
from . import lanczos
from . import pivoted_cholesky
from . import sparse
def prod(items):
"""
"""
if len(items):
res = items[0]
for item in items[1:]:
res = res * item
return res
else:
return 1
__all__ = [
"cached",
"linear_cg",
"StochasticLQ",
"cholesky",
"eig",
"fft",
"grid",
"interpolation",
"lanczos",
"pivoted_cholesky",
"sparse",
]
|
py | b409db9f31b61eacd71730bca665b75f012bc16e | import copy
import torchvision.models as models
from ptsemseg.models.fcn import fcn8s, fcn16s, fcn32s
from ptsemseg.models.segnet import segnet
from ptsemseg.models.unet import unet
from ptsemseg.models.pspnet import pspnet
from ptsemseg.models.icnet import icnet
from ptsemseg.models.icnet_is import icnet_is
from ptsemseg.models.icnet_is_wp import icnet_is_wp
from ptsemseg.models.linknet import linknet
from ptsemseg.models.frrn import frrn
def get_model(model_dict, n_classes, version=None):
name = model_dict["arch"]
model = _get_model_instance(name)
param_dict = copy.deepcopy(model_dict)
param_dict.pop("arch")
if name in ["frrnA", "frrnB"]:
model = model(n_classes, **param_dict)
elif name in ["fcn32s", "fcn16s", "fcn8s"]:
model = model(n_classes=n_classes, **param_dict)
vgg16 = models.vgg16(pretrained=True)
model.init_vgg16_params(vgg16)
elif name == "segnet":
model = model(n_classes=n_classes, **param_dict)
vgg16 = models.vgg16(pretrained=True)
model.init_vgg16_params(vgg16)
elif name == "unet":
model = model(n_classes=n_classes, **param_dict)
elif name == "pspnet":
model = model(n_classes=n_classes, **param_dict)
elif name == "icnet":
model = model(n_classes=n_classes, **param_dict)
elif name == "icnetBN":
model = model(n_classes=n_classes, **param_dict)
elif name == "icnet_is":
model = model(n_classes=n_classes, **param_dict)
elif name == "icnet_is_wp":
model = model(n_classes=n_classes, **param_dict)
else:
model = model(n_classes=n_classes, **param_dict)
return model
def _get_model_instance(name):
try:
return {
"fcn32s": fcn32s,
"fcn8s": fcn8s,
"fcn16s": fcn16s,
"unet": unet,
"segnet": segnet,
"pspnet": pspnet,
"icnet": icnet,
"icnet_is": icnet_is,
"icnet_is_wp": icnet_is_wp,
"icnetBN": icnet,
"linknet": linknet,
"frrnA": frrn,
"frrnB": frrn,
}[name]
except:
raise ("Model {} not available".format(name))
|
py | b409dbd679f772499394ea8cbb7a300405ad6b3b | # SPDX-FileCopyrightText: 2020 by Bryan Siepert, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
import time
import board
import busio
import adafruit_scd30
# SCD-30 has tempremental I2C with clock stretching, and delays
# It's best to start using I2C clock slower and then you can increase it
# until the sensor stops responding (NAK fails, etc)
i2c = busio.I2C(board.SCL, board.SDA, frequency=1000) # for FT232H, use 1KHz
scd = adafruit_scd30.SCD30(i2c)
while True:
# since the measurement interval is long (2+ seconds) we check for new data before reading
# the values, to ensure current readings.
if scd.data_available:
print("Data Available!")
print("CO2: %d PPM" % scd.CO2)
print("Temperature: %0.2f degrees C" % scd.temperature)
print("Humidity: %0.2f %% rH" % scd.relative_humidity)
print("")
print("Waiting for new data...")
print("")
time.sleep(0.5)
|
py | b409dc96bc79de99ec92c6971ab20917097368ca | # Copyright 2019 Palo Alto Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lib.script_logger import Logger
from lib.utils import CustomImage
CONFIG_FILE = "config.yaml"
def main():
# Custom Image library Initialization
lib = CustomImage(logger, CONFIG_FILE)
# Create a base Instance
lib.cloud_client.create_instance()
try:
# Connect to FW Instance
lib.connect_to_vmseries()
# License the FW
lib.license_firewall()
# Verify System
lib.verify_system()
# Upgrade Content
lib.upgrade_content()
# Upgrade Anti-virus
lib.upgrade_antivirus()
# Upgrade Global-Protect Clientless VPN
lib.upgrade_gp_cvpn()
# Upgrade Wildfire
lib.upgrade_wildfire()
# Upgrade VM Series Plugin
lib.upgrade_plugin()
# Upgrade PanOS
lib.upgrade_panos()
# Verify Upgrades
lib.verify_upgrades(when="before")
# Perform Private Data Reset
lib.private_data_reset()
# Verify Upgrades after Private Data Reset
lib.verify_upgrades(when="after")
# Close connection to the Firewall
lib.handler.close()
# Create Custom Image
lib.create_custom_image()
# # Cleanup
logger.info('*** Terminating Base Instance ***')
lib.cloud_client.terminate_instance()
logger.info('*** Termination Complete ***')
except Exception as e:
# Failed
logger.error(f'*** Failed to Create Custom Image ***')
logger.error(f'TRACEBACK: {str(e)}')
# Terminating created Instance
logger.info('*** Terminating Base Instance ***')
lib.cloud_client.terminate_instance()
logger.info('*** Termination Complete ***')
if __name__ == '__main__':
# Setup Logger
logger = Logger(console=True, level='DEBUG')
# Create Custom Image
main()
|
py | b409dcbc50ab056ad8ad0740fab756c3c697b7af | # 短信验证码有效期
SMS_CODE_REDIS_EXPIRES = 300
# 短信验证码模板编号
SMS_CODE_TEMP_ID = 1 |
py | b409dccbd188bac4f53976c88c83eb7d2bc60eb1 | # *************************************
# |docname| - Core tables and functions
# *************************************
import datetime
import jwt
import os
import random
import re
from gluon import current
import logging
logger = logging.getLogger(settings.logger)
logger.setLevel(settings.log_level)
## if you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.
## register with janrain.com, write your domain:api_key in private/janrain.key
# from gluon.contrib.login_methods.rpx_account import use_janrain
# use_janrain(auth,filename='private/janrain.key')
try:
from gluon.contrib.login_methods.janrain_account import RPXAccount
except ImportError:
print("Warning you should upgrade to a newer web2py for better janrain support")
from gluon.contrib.login_methods.rpx_account import RPXAccount # noqa: F401
from gluon.contrib.login_methods.extended_login_form import ( # noqa: F401
ExtendedLoginForm,
)
from gluon.tools import Auth, Crud, Service, PluginManager, prettydate # noqa: F401
#########################################################################
## This scaffolding model makes your app work on Google App Engine too
## File is released under public domain and you can use without limitations
#########################################################################
## if SSL/HTTPS is properly configured and you want all HTTP requests to
## be redirected to HTTPS, uncomment the line below:
# request.requires_htps()
# If the bookserver owns (performs migrations) on this table, then don't do web2py migrations on it.
def bookserver_owned(table_name):
return False
table_migrate_prefix = "runestone_"
if not request.env.web2py_runtime_gae:
## if NOT running on Google App Engine use SQLite or other DB
if os.environ.get("WEB2PY_CONFIG", "") == "test":
db = DAL(
settings.database_uri,
migrate=False,
pool_size=5,
adapter_args=dict(logfile="test_runestone_migrate.log"),
migrate_enabled=(
os.environ.get("WEB2PY_MIGRATE", "Yes") in ["Yes", "Fake"]
),
)
table_migrate_prefix = "test_runestone_"
else:
# WEB2PY_MIGRATE is either "Yes", "No", "Fake", or missing
db = DAL(
settings.database_uri,
pool_size=10,
fake_migrate_all=(os.environ.get("WEB2PY_MIGRATE", "Yes") == "Fake"),
migrate=False,
migrate_enabled=(
os.environ.get("WEB2PY_MIGRATE", "Yes") in ["Yes", "Fake"]
),
)
session.connect(
request,
response,
db,
masterapp=None,
migrate=table_migrate_prefix + "web2py_sessions.table",
)
else:
## connect to Google BigTable (optional 'google:datastore://namespace')
db = DAL("google:datastore")
## store sessions and tickets there
session.connect(request, response, db=db)
## or store session in Memcache, Redis, etc.
## from gluon.contrib.memdb import MEMDB
## from google.appengine.api.memcache import Client
## session.connect(request, response, db = MEMDB(Client()))
# For LTI you may want to open Runestone in an iframe. This is tricky
# and can run afoul of browser settings that disable 3rd party tracking.
# However this seems to do the trick at least from the test tool at
# https://lti.tools/saltire/tc - More testing with Canvas and Company
# is required. The Content Request launch also works in an iframe.
if "https" in settings.server_type:
session.secure()
if settings.lti_iframes is True:
session.samesite("None")
# This seems like it should allow us to share the session cookie across subdomains.
# and seems to work for every browser except for Safari
# I'm not sure what the issue is... So I'm commenting it out until I understand what is gong on.
# if settings.session_domain and "session_id_runestone" in response.cookies:
# response.cookies["session_id_runestone"]["domain"] = settings.session_domain
## by default give a view/generic.extension to all actions from localhost
## none otherwise. a pattern can be 'controller/function.extension'
response.generic_patterns = ["*"] if request.is_local else []
## (optional) optimize handling of static files
# response.optimize_css = 'concat,minify,inline'
# response.optimize_js = 'concat,minify,inline'
#########################################################################
## Here is sample code if you need for
## - email capabilities
## - authentication (registration, login, logout, ... )
## - authorization (role based authorization)
## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
## - old style crud actions
## (more options discussed in gluon/tools.py)
#########################################################################
auth = Auth(db, hmac_key=Auth.get_or_create_key())
crud, service, plugins = Crud(db), Service(), PluginManager()
# Make the settings and database available in modules.
current.db = db
current.settings = settings
current.auth = auth
if settings.enable_captchas:
## Enable captcha's :-(
from gluon.tools import Recaptcha
auth.settings.captcha = Recaptcha(
request,
"6Lfb_t4SAAAAAB9pG_o1CwrMB40YPsdBsD8GsvlD",
"6Lfb_t4SAAAAAGvAHwmkahQ6s44478AL5Cf-fI-x",
options="theme:'blackglass'",
)
auth.settings.login_captcha = False
auth.settings.retrieve_password_captcha = False
auth.settings.retrieve_username_captcha = False
# Set up for `two-factor authentication <http://web2py.com/books/default/chapter/29/09/access-control#Two-step-verification>`_.
# auth.settings.auth_two_factor_enabled = True
# auth.settings.two_factor_methods = [lambda user, auth_two_factor: 'password_here']
if os.environ.get("WEB2PY_CONFIG", "") == "production":
SELECT_CACHE = dict(cache=(cache.ram, 3600), cacheable=True)
COUNT_CACHE = dict(cache=(cache.ram, 3600))
else:
SELECT_CACHE = {}
COUNT_CACHE = {}
# .. _courses table:
#
# ``courses`` table
# =================
## create all tables needed by auth if not custom tables
db.define_table(
"courses",
Field("course_name", "string", unique=True),
Field("term_start_date", "date"),
Field("institution", "string"),
Field("base_course", "string"),
Field("python3", type="boolean", default=True),
Field("login_required", type="boolean", default=True),
Field("allow_pairs", type="boolean", default=False),
Field("student_price", type="integer"),
Field("downloads_enabled", type="boolean", default=False),
Field("courselevel", type="string"),
Field("new_server", type="boolean", default=False),
migrate=bookserver_owned("courses"),
)
# Provide a common query. Pass ``db.courses.ALL`` to retrieve all fields; otherwise, only the ``course_name`` and ``base_course`` are selected.
def get_course_row(*args, **kwargs):
if not args:
args = db.courses.course_name, db.courses.base_course
return db(db.courses.id == auth.user.course_id).select(*args).first()
# Make this available to modules.
current.get_course_row = get_course_row
# Provide the correct URL to a book, based on if it's statically or dynamically served. This function return URL(*args) and provides the correct controller/function based on the type of the current course (static vs dynamic).
def get_course_url(*args):
course = db(db.courses.id == auth.user.course_id).select().first()
args = tuple(x for x in args if x != "")
if course:
return URL(
a=settings.bks,
c="books",
f="published",
args=(course.course_name,) + args,
)
else:
return URL(c="default")
########################################
def getCourseNameFromId(courseid):
"""used to compute auth.user.course_name field"""
q = db.courses.id == courseid
row = db(q).select().first()
return row.course_name if row else ""
def getCourseOrigin(base_course):
res = (
db(
(db.course_attributes.course_id == db.courses.id)
& (db.courses.course_name == base_course)
& (db.course_attributes.attr == "markup_system")
)
.select(db.course_attributes.value, **SELECT_CACHE)
.first()
)
return res
def getCourseAttributesDict(course_id):
attributes = db(db.course_attributes.course_id == course_id).select(**SELECT_CACHE)
attrdict = {row.attr: row.value for row in attributes}
return attrdict
def verifyInstructorStatus(course, instructor):
"""
Make sure that the instructor specified is actually an instructor for the
given course.
"""
res = False
if type(course) == str:
course = (
db(db.courses.course_name == course)
.select(db.courses.id, **SELECT_CACHE)
.first()
)
try:
res = (
db(
(db.course_instructor.course == course)
& (db.course_instructor.instructor == instructor)
).count(**COUNT_CACHE)
> 0
)
except Exception as e:
logger.error(f"VIS -- {e}")
db.rollback()
res = (
db(
(db.course_instructor.course == course)
& (db.course_instructor.instructor == instructor)
).count(**COUNT_CACHE)
> 0
)
return res
def is_editor(userid):
ed = db(db.auth_group.role == "editor").select(db.auth_group.id).first()
row = (
db((db.auth_membership.user_id == userid) & (db.auth_membership.group_id == ed))
.select()
.first()
)
if row:
return True
else:
return False
class IS_COURSE_ID:
"""used to validate that a course name entered (e.g. devcourse) corresponds to a
valid course ID (i.e. db.courses.id)"""
def __init__(
self, error_message="Unknown course name. Please see your instructor."
):
self.e = error_message
def __call__(self, value):
if db(db.courses.course_name == value).select():
return (db(db.courses.course_name == value).select()[0].id, None)
return (value, self.e)
# Do not allow any of the reserved CSS characters in a username.
class HAS_NO_DOTS:
def __init__(
self,
error_message=r"""Your username may not contain spaces or any other special characters: !"#$%&'()*+,./:;<=>?@[\]^`{|}~ just letters and numbers""",
):
self.e = error_message
def __call__(self, value):
match = re.search(r"""[!"#$%&'()*+,./:;<=>?@[\]^`{|}~ ]""", value)
if match:
exist = db(db.auth_user.username == value).count()
if exist > 0: # user already registered give them a pass
return (value, None)
self.e = f"""Your username may not contain a {match.group(0).replace(" ","space")} or any other special characters except - or _"""
return (value, self.e)
return (value, None)
def formatter(self, value):
return value
db.define_table(
"auth_user",
Field("username", type="string", label=T("Username")),
Field("first_name", type="string", label=T("First Name")),
Field("last_name", type="string", label=T("Last Name")),
Field(
"email",
type="string",
requires=IS_EMAIL(banned="^.*shoeonlineblog\\.com$"),
label=T("Email"),
),
Field("password", type="password", readable=False, label=T("Password")),
Field(
"created_on",
"datetime",
default=request.now,
label=T("Created On"),
writable=False,
readable=False,
),
Field(
"modified_on",
"datetime",
default=request.now,
label=T("Modified On"),
writable=False,
readable=False,
update=request.now,
),
Field("registration_key", default="", writable=False, readable=False),
Field("reset_password_key", default="", writable=False, readable=False),
Field("registration_id", default="", writable=False, readable=False),
Field(
"course_id",
"reference courses",
label=T("Course Name"),
required=True,
default=1,
),
Field(
"course_name",
compute=lambda row: getCourseNameFromId(row.course_id),
readable=False,
writable=False,
),
Field(
"accept_tcp", required=True, type="boolean", default=True, label=T("I Accept")
),
Field("active", type="boolean", writable=False, readable=False, default=True),
Field("donated", type="boolean", writable=False, readable=False, default=False),
# format='%(username)s',
format=lambda u: (u.first_name or "") + " " + (u.last_name or ""),
migrate=bookserver_owned("auth_user"),
)
db.auth_user.first_name.requires = IS_NOT_EMPTY(error_message=auth.messages.is_empty)
db.auth_user.last_name.requires = IS_NOT_EMPTY(error_message=auth.messages.is_empty)
db.auth_user.password.requires = CRYPT(key=auth.settings.hmac_key)
db.auth_user.username.requires = (
HAS_NO_DOTS(),
IS_NOT_IN_DB(db, db.auth_user.username),
)
db.auth_user.registration_id.requires = IS_NOT_IN_DB(db, db.auth_user.registration_id)
db.auth_user.email.requires = (
IS_EMAIL(error_message=auth.messages.invalid_email),
IS_NOT_IN_DB(db, db.auth_user.email),
)
db.auth_user.course_id.requires = IS_COURSE_ID()
auth.define_tables(username=True, signature=False, migrate=table_migrate_prefix + "")
# Because so many pages rely on `views/_sphinx_static_file.html` it makes
# sense to provide some default values for variables used in the template here
# The latex_preamble attribute can be used for any custom latex macros used in
# the text, that need to be available for grading, assignments, and practice
# This is used in nearly every PreTeXt book.
request.latex_preamble = ""
def set_latex_preamble(base_course: str):
# See `models/db_ebook.py` for course_attributes table
bc = db(db.courses.course_name == base_course).select().first()
res = (
db(
(db.course_attributes.course_id == bc.id)
& (db.course_attributes.attr == "latex_macros")
)
.select()
.first()
)
request.latex_preamble = res.value if res else ""
## configure email
mail = auth.settings.mailer
mail.settings.server = settings.email_server
mail.settings.sender = settings.email_sender
mail.settings.login = settings.email_login
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
auth.settings.register_next = URL("default", "index")
# change default session login time from 1 hour to 24 hours
auth.settings.expiration = 3600 * 24
janrain_url = "http://%s/%s/default/user/login" % (
request.env.http_host,
request.application,
)
db.define_table(
"user_courses",
Field("user_id", db.auth_user, ondelete="CASCADE"),
Field("course_id", db.courses, ondelete="CASCADE"),
Field("user_id", db.auth_user),
Field("course_id", db.courses),
migrate=bookserver_owned("user_courses"),
)
# For whatever reason the automatic migration of this table failed. Need the following manual statements
# alter table user_courses alter column user_id type integer using user_id::integer;
# alter table user_courses alter column course_id type integer using course_id::integer;
#########################################################################
## Define your tables below (or better in another model file) for example
##
## >>> db.define_table('mytable',Field('myfield','string'))
##
## Fields can be 'string','text','password','integer','double','boolean'
## 'date','time','datetime','blob','upload', 'reference TABLENAME'
## There is an implicit 'id integer autoincrement' field
## Consult manual for more options, validators, etc.
##
## More API examples for controllers:
##
## >>> db.mytable.insert(myfield='value')
## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)
## >>> for row in rows: print(row.id, row.myfield)
#########################################################################
# mail.settings.server = settings.email_server
# mail.settings.sender = settings.email_sender
# mail.settings.login = settings.email_login
auth.messages.retrieve_username_subject = "Runestone Academy username"
auth.messages.reset_password_subject = "Runestone Academy password"
auth.messages.retrieve_username = """<html>
Hello,
<br>
<p>We received your request to retrieve your username. According to our files
Your username is: %(username)s </p>
<p>If you have any trouble with this automated system you can also ask your instructor
and they can help you retrieve your username or reset your password. If you are
an instructor, you can (as a last resort) contact Runestone by creating an issue
on <a href="https://github.com/RunestoneInteractive/RunestoneServer/issues">Github</a>.</p>
<p>This message was generated automatically and comes from an unmonitored email address. If you reply to this message a human will not see it. Use the github link above if you need help from a real person.</p>
Thanks for using Runestone!<br><br>
Brad Miller
</html>
"""
auth.messages.reset_password = """<html>
Hello, <br>
<p>If you click on <a href="%(link)s">this link</a> you will reset your password. Sometimes schools have software that tries to sanitize the previous link and makes it useless.</p>
<p>If you get a 404 try changing the http to https in the link provided.</p>
<p>If you have any trouble with the link you can also ask your instructor
and they can help you retrieve your username or reset your password. If you are
an instructor, you can (as a last resort) contact Runestone by creating an issue
on <a href="https://github.com/RunestoneInteractive/RunestoneServer/issues">Github</a>.</p>
<p>This message was generated automatically and comes from an unmonitored email address. If you reply to this message a human will not see it. Use the github link above if you need help from a real person.</p>
Thanks for using Runestone!<br><br>
Brad Miller
</html>
"""
# Make sure the latest version of admin is always loaded.
adminjs = os.path.join("applications", request.application, "static", "js", "admin.js")
try:
mtime = int(os.path.getmtime(adminjs))
except FileNotFoundError:
mtime = random.randrange(10000)
request.admin_mtime = str(mtime)
# response.headers["Access-Control-Allow-Origin"] = "*"
def check_for_donate_or_build(field_dict, id_of_insert):
if "donate" in request.vars:
session.donate = request.vars.donate
if "ccn_checkbox" in request.vars:
session.build_course = True
if "auth_user" in db:
db.auth_user._after_insert.append(check_for_donate_or_build)
def admin_logger(logger):
if settings.academy_mode:
if auth.user:
sid = auth.user.username
course = auth.user.course_name
else:
sid = "Anonymous"
course = "boguscourse"
try:
db.useinfo.insert(
sid=sid,
act=request.function,
div_id=request.env.query_string or "no params",
event=request.controller,
timestamp=datetime.datetime.utcnow(),
course_id=course,
)
except Exception as e:
logger.error(
f"failed to insert log record for {request.controller} {request.function}: {e}"
)
db.rollback()
def createUser(username, password, fname, lname, email, course_name, instructor=False):
cinfo = db(db.courses.course_name == course_name).select().first()
if not cinfo:
raise ValueError("Course {} does not exist".format(course_name))
pw = CRYPT(auth.settings.hmac_key)(password)[0]
uid = db.auth_user.insert(
username=username,
password=pw,
first_name=fname,
last_name=lname,
email=email,
course_id=cinfo.id,
course_name=course_name,
active="T",
created_on=datetime.datetime.now(),
)
db.user_courses.insert(user_id=uid, course_id=cinfo.id)
if instructor:
irole = db(db.auth_group.role == "instructor").select(db.auth_group.id).first()
db.auth_membership.insert(user_id=uid, group_id=irole)
db.course_instructor.insert(course=cinfo.id, instructor=uid)
def _validateUser(username, password, fname, lname, email, course_name, line):
errors = []
if auth.user.course_name != course_name:
errors.append(f"Course name does not match your course on line {line}")
cinfo = db(db.courses.course_name == course_name).select().first()
if not cinfo:
errors.append(f"Course {course_name} does not exist on line {line}")
match = re.search(r"""[!"#$%&'()*+,./:;<=>?@[\]^`{|}~ ]""", username)
if match:
errors.append(
f"""Username cannot contain a {match.group(0).replace(" ", "space")} on line {line}"""
)
uinfo = db(db.auth_user.username == username).count()
if uinfo > 0:
errors.append(f"Username {username} already exists on line {line}")
if password == "":
errors.append(f"password cannot be blank on line {line}")
if "@" not in email:
errors.append(f"Email address missing @ on line {line}")
return errors
def create_rs_token():
d = dict(sub=auth.user.username)
expires = datetime.timedelta(days=30)
_create_access_token(d, expires=expires)
# This function is basically copied from the fastapi_login plugin
# see `their github repo <https://github.com/MushroomMaula/fastapi_login>`_
#
def _create_access_token(data: dict, expires=None, scopes=None) -> bytes:
"""
Helper function to create the encoded access token using
the provided secret and the algorithm of the LoginManager instance
Args:
data (dict): The data which should be stored in the token
expires (datetime.timedelta): An optional timedelta in which the token expires.
Defaults to 15 minutes
scopes (Collection): Optional scopes the token user has access to.
Returns:
The encoded JWT with the data and the expiry. The expiry is
available under the 'exp' key
"""
to_encode = data.copy()
if expires:
expires_in = datetime.datetime.utcnow() + expires
else:
# default to 15 minutes expiry times
expires_in = datetime.datetime.utcnow() + datetime.timedelta(minutes=15)
to_encode.update({"exp": expires_in})
if scopes is not None:
unique_scopes = set(scopes)
to_encode.update({"scopes": list(unique_scopes)})
algorithm = "HS256" # normally set in constructor
# the secret key value should be set in 1.py as part of the
# web2py installation.
jwt_secret = settings.jwt_secret
try:
encoded_jwt = jwt.encode(to_encode, jwt_secret, algorithm)
except Exception as e:
logger.error(f"Failed to create a JWT Token for {to_encode}: {e}")
if not jwt_secret:
logger.error("Please set a secret key value in models/1.py")
encoded_jwt = None
# Added Runestone-only code: set cookie
if encoded_jwt:
response.cookies["access_token"] = encoded_jwt
response.cookies["access_token"]["expires"] = 24 * 3600 * 30
response.cookies["access_token"]["path"] = "/"
# decode here decodes the byte str to a normal str not the token
return encoded_jwt
|
py | b409dcd3fab7e540bfd2ae6f21eeab7b6240c07e | # -*- coding: utf-8 -*-
'''Live CSS Reloading for PySide'''
from Qt import QtCore
from collections import defaultdict
class LiveStyle(QtCore.QFileSystemWatcher):
'''Updates a widgets style when its css changes.
Simple usage:
w = QtGui.QWidget()
LiveStyle(path='path/to/style.css', parent=w)
Multiple widgets and stylesheets:
w = QtGui.QMainWindow()
d = QtGui.QDialog(parent=w)
live = LiveStyle(parent=w)
live.link(w, 'path/to/windowstyle.css')
live.link(d, 'path/to/dialogstyle.css')
'''
def __init__(self, path=None, parent=None):
super(LiveStyle, self).__init__(parent)
self.fileChanged.connect(self.css_changed)
self.path_mapping = defaultdict(set)
if path and parent:
self.link(parent, path)
def __repr__(self):
return '<{}>(parent={})'.format(self.__class__.__name__, self.parent())
def link(self, widget, path):
'''Links a widget to a stylesheet path.
Arguments:
widget: QtGui.QWidget instance
path: Filepath to stylesheet
'''
self.path_mapping[path].add(widget)
self.addPath(path)
def unlink(self, widget, path):
'''Unlinks a widget from a stylesheet path.
Arguments:
widget: QtGui.QWidget instance
path: Filepath to stylesheet
'''
if not self.path_mapping[path]:
return
self.path_mapping[path].discard(widget)
if not self.path_mapping[path]:
self.path_mapping.pop(path)
self.removePath(path)
def css_changed(self, path):
'''Updates all widget linked to the changed filepath.'''
widgets = self.path_mapping[path]
with open(path) as f:
style = f.read()
for widget in widgets:
widget.style().unpolish(widget)
widget.setStyleSheet(style)
widget.style().polish(widget)
|
py | b409dd5d4da7545bf5737ebf689a09d73469d401 | # coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import billforward
from billforward.rest import ApiException
from billforward.models.resume_subscription_amendment import ResumeSubscriptionAmendment
class TestResumeSubscriptionAmendment(unittest.TestCase):
""" ResumeSubscriptionAmendment unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testResumeSubscriptionAmendment(self):
"""
Test ResumeSubscriptionAmendment
"""
model = billforward.models.resume_subscription_amendment.ResumeSubscriptionAmendment()
if __name__ == '__main__':
unittest.main()
|
py | b409dda0f656bd9dbc0bebecace1ead59222065a | # Copyright 2017-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <[email protected]>, 2017
# - Mario Lassnig <[email protected]>, 2018-2019
#
# PY3K COMPATIBLE
from jsonschema import validate, ValidationError
from rucio.common.exception import InvalidObject
ACCOUNT = {"description": "Account name",
"type": "string",
"pattern": "^[a-z0-9-_]{1,30}$"}
ACCOUNTS = {"description": "Array of accounts",
"type": "array",
"items": ACCOUNT,
"minItems": 0,
"maxItems": 1000}
ACCOUNT_TYPE = {"description": "Account type",
"type": "string",
"enum": ["USER", "GROUP", "SERVICE"]}
ACTIVITY = {"description": "Activity name",
"type": "string",
"enum": ["Data Brokering", "Data Consolidation", "Data rebalancing",
"Debug", "Express", "Functional Test", "Functional Test XrootD",
"Functional Test WebDAV", "Group Subscriptions",
"Production Input", "Production Output",
"Analysis Input", "Analysis Output", "Staging",
"T0 Export", "T0 Tape", "Upload/Download (Job)",
"Upload/Download (User)", "User Subscriptions"]}
SCOPE_LENGTH = 25
SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "^[a-zA-Z_\\-.0-9]{1,%s}$" % SCOPE_LENGTH}
R_SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "\\w"}
NAME_LENGTH = 250
NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "^[A-Za-z0-9][A-Za-z0-9\\.\\-\\_]{1,%s}$" % NAME_LENGTH}
R_NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "\\w"}
LOCKED = {"description": "Rule locked status",
"type": ["boolean", "null"]}
ASK_APPROVAL = {"description": "Rule approval request",
"type": ["boolean", "null"]}
ASYNCHRONOUS = {"description": "Asynchronous rule creation",
"type": ["boolean", "null"]}
PURGE_REPLICAS = {"description": "Rule purge replica status",
"type": "boolean"}
IGNORE_AVAILABILITY = {"description": "Rule ignore availability status",
"type": "boolean"}
RSE = {"description": "RSE name",
"type": "string",
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"}
RSE_ATTRIBUTE = {"description": "RSE attribute",
"type": "string",
"pattern": r'([A-Za-z0-9\._-]+[=<>][A-Za-z0-9_-]+)'}
DEFAULT_RSE_ATTRIBUTE = {"description": "Default RSE attribute",
"type": "string",
"pattern": r'([A-Z0-9]+([_-][A-Z0-9]+)*)'}
REPLICA_STATE = {"description": "Replica state",
"type": "string",
"enum": ["AVAILABLE", "UNAVAILABLE", "COPYING", "BEING_DELETED", "BAD", "SOURCE", "A", "U", "C", "B", "D", "S"]}
DATE = {"description": "Date",
"type": "string",
"pattern": r'((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun))[,]\s\d{2}\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s(0\d|1\d|2[0-3])(\:)(0\d|1\d|2\d|3\d|4\d|5\d)(\:)(0\d|1\d|2\d|3\d|4\d|5\d)\s(UTC)'}
DID_TYPE = {"description": "DID type",
"type": "string",
"enum": ["DATASET", "CONTAINER", "FILE", "F"]}
GROUPING = {"description": "Rule grouping",
"type": ["string", "null"],
"enum": ["DATASET", "NONE", "ALL", None]}
NOTIFY = {"description": "Rule notification setting",
"type": ["string", "null"],
"enum": ["Y", "C", "N", "P", None]}
COMMENT = {"description": "Rule comment",
"type": ["string", "null"],
"maxLength": 250}
METADATA = {"description": "Rule wfms metadata",
"type": ["string", "null"],
"maxLength": 3999}
BYTES = {"description": "Size in bytes",
"type": "integer"}
ADLER32 = {"description": "adler32",
"type": "string",
"pattern": "^[a-fA-F\\d]{8}$"}
WEIGHT = {"description": "Rule weight",
"type": ["string", "null"]}
MD5 = {"description": "md5",
"type": "string",
"pattern": "^[a-fA-F\\d]{32}$"}
UUID = {"description": "Universally Unique Identifier (UUID)",
"type": "string",
"pattern": '^(\\{){0,1}[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}(\\}){0,1}$'}
META = {"description": "Data Identifier(DID) metadata",
"type": "object",
"properties": {"guid": UUID},
"additionalProperties": True}
PFN = {"description": "Physical File Name", "type": "string"}
COPIES = {"description": "Number of replica copies", "type": "integer"}
RSE_EXPRESSION = {"description": "RSE expression", "type": "string"}
SOURCE_REPLICA_EXPRESSION = {"description": "RSE expression", "type": ["string", "null"]}
LIFETIME = {"description": "Lifetime", "type": "number"}
RULE_LIFETIME = {"description": "Rule lifetime", "type": ["number", "null"]}
SUBSCRIPTION_ID = {"description": "Rule Subscription id", "type": ["string", "null"]}
PRIORITY = {"description": "Priority of the transfers",
"type": "integer"}
SPLIT_CONTAINER = {"description": "Rule split container mode",
"type": ["boolean", "null"]}
RULE = {"description": "Replication rule",
"type": "object",
"properties": {"dids": {"type": "array"},
"account": ACCOUNT,
"copies": COPIES,
"rse_expression": RSE_EXPRESSION,
"grouping": GROUPING,
"weight": WEIGHT,
"lifetime": RULE_LIFETIME,
"locked": LOCKED,
"subscription_id": SUBSCRIPTION_ID,
"source_replica_expression": SOURCE_REPLICA_EXPRESSION,
"activity": ACTIVITY,
"notify": NOTIFY,
"purge_replicas": PURGE_REPLICAS,
"ignore_availability": IGNORE_AVAILABILITY,
"comment": COMMENT,
"ask_approval": ASK_APPROVAL,
"asynchronous": ASYNCHRONOUS,
"priority": PRIORITY,
'split_container': SPLIT_CONTAINER,
'meta': METADATA},
"required": ["dids", "copies", "rse_expression"],
"additionalProperties": False}
RULES = {"description": "Array of replication rules",
"type": "array",
"items": RULE,
"minItems": 1,
"maxItems": 1000}
COLLECTION_TYPE = {"description": "Dataset or container type",
"type": "string",
"enum": ["DATASET", "CONTAINER"]}
COLLECTION = {"description": "Dataset or container",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": COLLECTION_TYPE,
"meta": META,
"rules": RULES},
"required": ["scope", "name", "type"],
"additionalProperties": False}
COLLECTIONS = {"description": "Array of datasets or containers",
"type": "array",
"items": COLLECTION,
"minItems": 1,
"maxItems": 1000}
DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DID_FILTERS = {"description": "Filters dictionary to list DIDs",
"type": "object",
"properties": {"created_before": DATE,
"created_afted": DATE},
"additionalProperties": True}
R_DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": R_SCOPE,
"name": R_NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": DID,
"minItems": 1,
"maxItems": 1000}
R_DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": R_DID,
"minItems": 1,
"maxItems": 1000}
ATTACHMENT = {"description": "Attachement",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"rse": {"description": "RSE name",
"type": ["string", "null"],
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"},
"dids": DIDS},
"required": ["dids"],
"additionalProperties": False}
ATTACHMENTS = {"description": "Array of attachments",
"type": "array",
"items": ATTACHMENT,
"minItems": 1,
"maxItems": 1000}
SUBSCRIPTION_FILTER = {"type": "object",
"properties": {"datatype": {"type": "array"},
"prod_step": {"type": "array"},
"stream_name": {"type": "array"},
"project": {"type": "array"},
"scope": {"type": "array"},
"pattern": {"type": "string"},
"excluded_pattern": {"type": "string"},
"group": {"type": "string"},
"provenance": {"type": "string"},
"account": ACCOUNTS,
"grouping": {"type": "string"},
"split_rule": {"type": "boolean"}}}
ADD_REPLICA_FILE = {"description": "add replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"bytes": BYTES,
"adler32": ADLER32},
"required": ["scope", "name", "bytes", "adler32"]}
ADD_REPLICA_FILES = {"description": "add replica files",
"type": "array",
"items": ADD_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_ADD_REPLICAS = {"description": "rucio cache add replicas",
"type": "object",
"properties": {"files": ADD_REPLICA_FILES,
"rse": RSE,
"lifetime": LIFETIME,
"operation": {"enum": ["add_replicas"]}},
"required": ['files', 'rse', 'lifetime', 'operation']}
DELETE_REPLICA_FILE = {"description": "delete replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME},
"required": ["scope", "name"]}
DELETE_REPLICA_FILES = {"description": "delete replica files",
"type": "array",
"items": DELETE_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_DELETE_REPLICAS = {"description": "rucio cache delete replicas",
"type": "object",
"properties": {"files": DELETE_REPLICA_FILES,
"rse": RSE,
"operation": {"enum": ["delete_replicas"]}},
"required": ['files', 'rse', 'operation']}
MESSAGE_OPERATION = {"type": "object",
"properties": {'operation': {"enum": ["add_replicas", "delete_replicas"]}}}
ACCOUNT_ATTRIBUTE = {"description": "Account attribute",
"type": "string",
"pattern": r'^[a-z0-9-_]{1,30}$'}
SCOPE_NAME_REGEXP = '/(.*)/(.*)'
RSES = {"description": "list of RSEs",
"type": "array",
"items": {
"type": "object",
"properties": {
"rse": RSE
},
"required": ["rse"],
"additionalProperties": True}}
DISTANCE = {"description": "RSE distance",
"type": "object",
"properties": {
"src_rse_id": {"type": "string"},
"dest_rse_id": {"type": "string"},
"ranking": {"type": "integer"}
},
"required": ["src_rse_id", "dest_rse_id", "ranking"],
"additionalProperties": True}
IMPORT = {"description": "import data into rucio.",
"type": "object",
"properties": {
"rses": RSES,
"distances": {
"type": "object"
}
}}
SCHEMAS = {'account': ACCOUNT,
'account_type': ACCOUNT_TYPE,
'activity': ACTIVITY,
'name': NAME,
'r_name': R_NAME,
'rse': RSE,
'rse_attribute': RSE_ATTRIBUTE,
'scope': SCOPE,
'r_scope': R_SCOPE,
'did': DID,
'did_filters': DID_FILTERS,
'r_did': R_DID,
'dids': DIDS,
'rule': RULE,
'r_dids': R_DIDS,
'collection': COLLECTION,
'collections': COLLECTIONS,
'attachment': ATTACHMENT,
'attachments': ATTACHMENTS,
'subscription_filter': SUBSCRIPTION_FILTER,
'cache_add_replicas': CACHE_ADD_REPLICAS,
'cache_delete_replicas': CACHE_DELETE_REPLICAS,
'account_attribute': ACCOUNT_ATTRIBUTE,
'import': IMPORT}
def validate_schema(name, obj):
"""
Validate object against json schema
:param name: The json schema name.
:param obj: The object to validate.
"""
try:
if obj:
validate(obj, SCHEMAS.get(name, {}))
except ValidationError as error: # NOQA, pylint: disable=W0612
raise InvalidObject("Problem validating %(name)s : %(error)s" % locals())
|
py | b409ddaed842bc4200b5483ed5bd27f4cfef5db8 | from django.core.management.base import NoArgsCommand
from django_extensions.management.utils import get_project_root
from optparse import make_option
from os.path import join as _j
import os
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--optimize', '-o', '-O', action='store_true', dest='optimize',
help='Remove optimized python bytecode files'),
make_option('--path', '-p', action='store', dest='path',
help='Specify path to recurse into'),
)
help = "Removes all python bytecode compiled files from the project."
requires_model_validation = False
def handle_noargs(self, **options):
project_root = options.get("path", None)
if not project_root:
project_root = get_project_root()
exts = options.get("optimize", False) and [".pyc", ".pyo"] or [".pyc"]
verbose = int(options.get("verbosity", 1))
if verbose > 1:
print("Project Root: %s" % project_root)
for root, dirs, files in os.walk(project_root):
for file in files:
ext = os.path.splitext(file)[1]
if ext in exts:
full_path = _j(root, file)
if verbose > 1:
print(full_path)
os.remove(full_path)
|
py | b409df9ac77b846e1992488997a9b284fda663d8 | from __future__ import print_function
from itertools import combinations
import networkx as nx
import pandas as pd
import argparse
from os import path
#
# Usage example:
#
# $ python3 tf_change_explanations.py -p 0.01 -d some_directory/Untreated_vs_Treatment_differential_md_scores.txt -o untreated_vs_treated_explanation.txt
#
basedir = path.split(__file__)[0]
KNOWLEDGE_GRAPH = "%s/public_knowledge/prot_reactome_interactions.pkl" % basedir
TF_UNIPROT_MAP = "%s/public_knowledge/human_TFs_to_uniprot.txt" % basedir
TF_HOCOMOCO_UNIPROT_MAP = "%s/public_knowledge/HOCOMOCOv11_to_uniprot.txt" % basedir
ONTO_LABELS = "%s/public_knowledge/all_labels.tsv" % basedir
PATH_SEARCH_DEPTH = 2
EXTRA_NODES = []
EXTRA_NODES_LABEL = []
UNINTERESTING_RELATIONS = []
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--p-value', dest='p_val', help="P-value cutoff to determine which TFs to include from DAStk's output (default=0.05).", required=False, default=0.05)
parser.add_argument('-d', '--dastk-results', dest='dastk_results', help="Results file from DAStk (*differential_md_scores.txt) used to find relations between the most significant TF changes in activity.", required=True)
parser.add_argument('-o', '--output', dest='output_filename', help='Output filename for the report.', required=True)
parser.add_argument('-u', '--uninteresting-nodes', dest='uninteresting_nodes', help="File listing ontology concept URIs to ignore during the pathway searches, because they are uninformative for TFs (e.g.\"binds to DNA\") or they don't apply to the current study, just to minimize noise. One URI per line, can optionally add a description after a TAB to track the label of the ignored intersecting concepts. See the example file for a format guide.", required=False)
parser.add_argument('-e', '--extra-concepts', dest='extra_concepts', help="File listing extra ontology concepts to include in the pathway searches, that are relevant to this study. This is a two-column (TAB-separated) list, first the ontology URI and second a label you'd like to use in the report.", required=False)
args = parser.parse_args()
G = nx.read_gpickle(KNOWLEDGE_GRAPH)
if args.extra_concepts:
with open(args.extra_concepts, 'r') as fd:
for line in fd.readlines():
if len(line) > 0:
chunks = line.split('\t')
EXTRA_NODES.append(chunks[0])
EXTRA_NODES_LABEL.append(chunks[1][:-1])
if args.uninteresting_nodes:
with open(args.uninteresting_nodes, 'r') as fd:
for line in fd.readlines():
if len(line) > 0:
chunks = line.split('\t')
UNINTERESTING_RELATIONS.append(chunks[0])
# cache the Uniprot mappings
uniprot_tf_ids = dict()
for mapping_file in [TF_UNIPROT_MAP, TF_HOCOMOCO_UNIPROT_MAP]:
with open(mapping_file, 'r') as fd:
for line in fd.readlines():
chunks = line.split('\t')
uniprot_tf_ids[chunks[0]] = chunks[1]
# Gather the list of significantly changing TFs
significant_tfs = []
significant_tfs_key = []
for extra_node in EXTRA_NODES:
significant_tfs.append(extra_node)
for extra_key in EXTRA_NODES_LABEL:
significant_tfs_key.append(extra_key)
with open(args.dastk_results, 'r') as fd:
for line in fd.readlines():
chunks = line.split('\t')
tf_name = chunks[0].split('_')[0]
p_value = float(chunks[1])
if p_value < float(args.p_val) and tf_name not in significant_tfs_key:
significant_tfs.append("<http://purl.obolibrary.org/obo/PR_%s>" % uniprot_tf_ids[tf_name])
significant_tfs_key.append(tf_name)
labels_df = pd.read_csv(ONTO_LABELS, sep="\t", na_filter=False, names=['concept', 'label'])
idx = 0
for extra_concept in EXTRA_NODES:
labels_df.loc[labels_df.index.max() + 1] = [extra_concept, EXTRA_NODES_LABEL[idx]]
idx += 1
output_fd = open(args.output_filename, 'w')
output_fd.write("Transcription factors displaying a significant difference in activity:\n%s" % ", ".join(sorted(significant_tfs_key)))
#print(sorted(significant_tfs_key))
common_intersections = dict()
rel_list = dict()
rel_list[1000] = []
for pair in combinations(significant_tfs, 2):
#print(pair)
try:
for path in nx.all_simple_paths(G, source=pair[0], target=pair[1], cutoff=PATH_SEARCH_DEPTH):
idx_1 = significant_tfs.index(pair[0])
idx_2 = significant_tfs.index(pair[1])
if len(path) == 2:
relation_concept = G.edges[pair[0], pair[1]]['label']
relation = labels_df[(labels_df.concept==relation_concept)].label.values[0]
if 0 in rel_list:
rel_list[0].append("%s %s %s\n" % (significant_tfs_key[idx_1], relation, significant_tfs_key[idx_2]))
else:
rel_list[0] = ["%s %s %s\n" % (significant_tfs_key[idx_1], relation, significant_tfs_key[idx_2])]
elif len(path) == 3:
if path[1] in UNINTERESTING_RELATIONS:
continue
relation_1 = G.edges[path[0], path[1]]['label']
relation_1_label = labels_df[(labels_df.concept==relation_1)].label.values[0]
relation_2 = G.edges[path[1], path[2]]['label']
relation_2_label = labels_df[(labels_df.concept==relation_2)].label.values[0]
intersecting_concept = labels_df[(labels_df.concept==path[1])].label.values[0]
if intersecting_concept in common_intersections:
if relation_1_label in common_intersections[intersecting_concept]:
common_intersections[intersecting_concept][relation_1_label].append(significant_tfs_key[idx_1])
else:
common_intersections[intersecting_concept][relation_1_label] = [significant_tfs_key[idx_1]]
if relation_2_label in common_intersections[intersecting_concept]:
common_intersections[intersecting_concept][relation_2_label].append(significant_tfs_key[idx_2])
else:
common_intersections[intersecting_concept][relation_2_label] = [significant_tfs_key[idx_2]]
else:
common_intersections[intersecting_concept] = dict()
common_intersections[intersecting_concept][relation_1_label] = [significant_tfs_key[idx_1]]
if relation_1 == relation_2:
common_intersections[intersecting_concept][relation_1_label].append(significant_tfs_key[idx_2])
else:
common_intersections[intersecting_concept][relation_2_label] = [significant_tfs_key[idx_2]]
else:
if any([x in path for x in UNINTERESTING_RELATIONS]):
continue
last_node = None
path_desc = ''
for i in range(len(path) - 1):
if last_node:
node1 = last_node
path_desc += ", and "
else:
node1 = labels_df[(labels_df.concept==path[i])].label.values[0]
node2 = labels_df[(labels_df.concept==path[i+1])].label.values[0]
rel_concept = G.edges[path[i], path[i+1]]['label']
rel = labels_df[(labels_df.concept==rel_concept)].label.values[0]
if i == len(path) - 2:
path_desc += "%s %s %s" % (node2, rel, node1)
else:
path_desc += "%s %s %s" % (node1, rel, node2)
last_node = node2
rel_list[1000].append(path_desc + '\n')
except nx.exception.NetworkXNoPath as e:
#print("No paths")
pass
except nx.exception.NodeNotFound as e:
#print("\nWarning: No node for %s or %s" % (pair[0], pair[1]))
pass
except IndexError as e:
print("\nWarning: Problem adding this path (likely missing label, or deprecated ontology concept):")
print(path)
for intersection in common_intersections.keys():
for relation in common_intersections[intersection].keys():
tfs_involved = sorted(list(set(common_intersections[intersection][relation])))
if len(tfs_involved) == 1:
explanation = "%s %s %s\n\n" % (tfs_involved[0], relation, intersection)
elif len(tfs_involved) == 2:
explanation = "%s and %s: %s %s\n\n" % (", ".join(tfs_involved[:-1]), tfs_involved[-1], relation, intersection)
else:
explanation = "%s, and %s: %s %s\n\n" % (", ".join(tfs_involved[:-1]), tfs_involved[-1], relation, intersection)
if len(tfs_involved) in rel_list:
rel_list[len(tfs_involved)].append(explanation)
else:
rel_list[len(tfs_involved)] = [explanation]
# Output results
output_fd.write("\n\nHere's what we know about these TFs presenting significant activity changes (p=%.2E):\n-----------------------------------------------------------------------------------------\n" % float(args.p_val))
if 0 in rel_list:
output_fd.write("\nDirect interactions between each of these TFs:\n--------------------------------\n")
for line in rel_list[0]:
output_fd.write(line)
output_fd.write("\nOther ways these TFs are related:\n---------------------------------\n")
for tf_count in sorted(rel_list.keys(), reverse=True):
if tf_count > 0:
for line in rel_list[tf_count]:
output_fd.write(line)
output_fd.close()
if __name__=='__main__':
main()
|
py | b409dfe6aa7a591af6437dbc7341de2da634cabc | from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
import six
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlbag import Base as SqlxBase
from sqlbag import S, metadata_from_session, temporary_database
Base = declarative_base(cls=SqlxBase)
class Something(Base):
id = Column(Integer, primary_key=True)
name = Column(String)
def test_orm_stuff():
with temporary_database() as url:
with S(url) as s:
Base.metadata.create_all(s.bind.engine)
with S(url) as s:
x = Something(name="kanye")
s.add(x)
s.commit()
things = s.query(Something).all()
x1 = things[0]
prefix = "u" if six.PY2 else ""
repr_str = "Something(id=1, name={}'kanye')".format(prefix)
assert repr(x1) == str(x1) == repr_str
assert metadata_from_session(s).schema == Base.metadata.schema
assert x1._sqlachanges == {}
assert x1._ordereddict == OrderedDict([("id", 1), ("name", "kanye")])
x1.name = "kanye west"
assert x1._sqlachanges == {"name": ["kanye", "kanye west"]}
s.commit()
assert x1._sqlachanges == {}
|
py | b409e003eb7cc099da94ffebd80fe7af5dc7eec3 | import os
from pathlib import Path
import numpy as np
from scipy.io import loadmat
from dateutil.parser import parse as dateparse
import pandas as pd
from pynwb import NWBFile, NWBHDF5IO
from pynwb.file import Subject
from pynwb.behavior import SpatialSeries, Position
from pynwb.ecephys import ElectricalSeries
from hdmf.backends.hdf5.h5_utils import H5DataIO
from to_nwb import neuroscope as ns
WRITE_ALL_LFPS = False
# get sessionInfo
fpath = '/Users/bendichter/dev/buzcode/exampleDataStructs/fbasename'
fpath_base, fname = os.path.split(fpath)
session_info_matin = loadmat(
'/Users/bendichter/dev/buzcode/exampleDataStructs/20170505_396um_0um_merge.sessionInfo.mat',
struct_as_record=True)
date_text = session_info_matin['sessionInfo']['Date'][0][0][0]
animal_info_matin = loadmat(
os.path.join(fpath_base, fname + '.animalMetaData.mat'),
struct_as_record=True)
keys = ('ID', 'strain', 'species', 'surgeryDate')
animal_info = {key: animal_info_matin['animal'][key][0][0][0] for key in keys}
session_start_time = dateparse(date_text, yearfirst=True)
subject = Subject(subject_id=animal_info['ID'],
strain=animal_info['strain'],
species=animal_info['species'])
if 'DOB' in animal_info and type(animal_info['DOB']) is not str:
subject.age = str(session_start_time - animal_info['DOB'])
nwbfile = NWBFile(session_description='mouse in open exploration and theta maze', identifier=fname,
session_start_time=session_start_time,
institution='NYU', lab='Buzsaki', subject=subject)
all_ts = []
xml_filepath = os.path.join(fpath_base, fname + '.xml')
print(xml_filepath)
channel_groups = ns.get_channel_groups(xml_filepath)
shank_channels = ns.get_shank_channels(xml_filepath)
nshanks = len(shank_channels)
all_shank_channels = np.concatenate(shank_channels)
nchannels = sum(len(x) for x in channel_groups)
lfp_fs = ns.get_lfp_sampling_rate(xml_filepath)
lfp_channel = 0 # value taken from Yuta's spreadsheet
my_whl_file = os.path.join(fpath_base, fname + '.whl')
my_behavior_file = os.path.join(fpath_base, fname + '.sessionInfo.mat')
if os.path.isfile(my_whl_file):
pos_df = ns.add_position_data(nwbfile, fname)
elif os.path.isfile(my_behavior_file):
bI = loadmat(os.path.join(fpath_base, fname + '.behavior.mat'), struct_as_record=True)
# date_text = np.array2string(bI['behavior']['position'][0][0])
# date_text = date_text[2:-2];
d = {'col1': [1, 2], 'col2': [3, 4]}
df = pd.DataFrame(data=d)
print('done.')
print('setting up electrodes...', end='', flush=True)
ns.write_electrode_table(nwbfile, fpath)
# shank electrodes
device = nwbfile.create_device('implant', fname + '.xml')
electrode_counter = 0
for shankn, channels in enumerate(shank_channels):
electrode_group = nwbfile.create_electrode_group(
name='shank{}'.format(shankn),
description=fname,
device=device,
location='unknown')
for channel in channels:
nwbfile.add_electrode(np.nan, np.nan, np.nan, # position?
imp=np.nan,
location='unknown',
filtering='unknown',
description='electrode {} of shank {}, channel {}'.format(
electrode_counter, shankn, channel),
group=electrode_group)
if channel == lfp_channel:
lfp_table_region = nwbfile.create_electrode_table_region(
[electrode_counter], 'lfp electrode')
electrode_counter += 1
all_table_region = nwbfile.create_electrode_table_region(
list(range(electrode_counter)), 'all electrodes')
print('done.')
# lfp
print('reading LFPs...', end='', flush=True)
my_lfp_file = Path(os.path.join(fpath_base, fname + '.lfp'))
my_eeg_file = Path(os.path.join(fpath_base, fname + '.eeg'))
lfp_file = 1
if my_lfp_file.is_file():
lfp_file = os.path.join(fpath_base, fname + '.lfp')
elif my_eeg_file.is_file():
lfp_file = os.path.join(fpath_base, fname + '.eeg')
if isinstance(lfp_file, str):
# this needs to be rewritten to
# 1) pull the number of channel (here hard coded as N = 80), from the XML
# 2) load in chunks so you don't overwhelm the RAM
all_channels = np.fromfile(lfp_file, dtype=np.int16).reshape(-1, 80)
all_channels_lfp = all_channels[:, all_shank_channels]
print('done.')
if WRITE_ALL_LFPS:
print('making ElectricalSeries objects for LFP...', end='', flush=True)
lfp = nwbfile.add_acquisition(
ElectricalSeries('lfp',
'lfp signal for all shank electrodes',
H5DataIO(all_channels_lfp, compression='gzip'),
all_table_region,
conversion=np.nan,
starting_time=0.0,
rate=lfp_fs,
resolution=np.nan))
all_ts.append(lfp)
print('done.')
module_behavior = nwbfile.create_processing_module(name='behavior',
description='contains behavioral data')
out_fname = fname + '.nwb'
print('writing NWB file...', end='', flush=True)
with NWBHDF5IO(out_fname, mode='w') as io:
io.write(nwbfile, cache_spec=False)
print('done.')
print('testing read...', end='', flush=True)
# test read
with NWBHDF5IO(out_fname, mode='r') as io:
io.read()
print('done.')
|
py | b409e101961948c073da7ad70a129ce80e19cc01 | import numpy as np
from multiprocessing import Process, Pipe
from a2c.common.vec_env import VecEnv
def worker(remote, env_fn_wrapper):
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.action_space, env.observation_space))
elif cmd == 'get_action_meanings':
remote.send(env.unwrapped.get_action_meanings())
else:
raise NotImplementedError
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_id, env_fns):
"""
envs: list of gym environments to run in subprocesses
"""
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, CloudpickleWrapper(env_fn)))
for (work_remote, env_fn) in zip(self.work_remotes, env_fns)]
for p in self.ps:
p.start()
self.remotes[0].send(('get_spaces', None))
self.action_space, self.observation_space = self.remotes[0].recv()
self.remotes[0].send(('get_action_meanings', None))
self.action_meanings = self.remotes[0].recv()
self.env_id = env_id
def step(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
results = [remote.recv() for remote in self.remotes]
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
@property
def num_envs(self):
return len(self.remotes)
|
py | b409e12205e13daece535f61a5d2376ca117ee19 | # Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
"""
A collection of basic statistical functions for Python.
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
import warnings
import math
from math import gcd
from collections import namedtuple
import numpy as np
from numpy import array, asarray, ma
from numpy.lib import NumpyVersion
from scipy.spatial.distance import cdist
from scipy.ndimage import _measurements
from scipy._lib._util import (check_random_state, MapWrapper,
rng_integers, float_factorial)
import scipy.special as special
from scipy import linalg
from . import distributions
from . import _mstats_basic as mstats_basic
from ._stats_mstats_common import (_find_repeats, linregress, theilslopes,
siegelslopes)
from ._stats import (_kendall_dis, _toint64, _weightedrankedtau,
_local_correlations)
from dataclasses import make_dataclass
from ._hypotests import _all_partitions
from ._hypotests_pythran import _compute_outer_prob_inside_method
from ._axis_nan_policy import (_axis_nan_policy_factory,
_broadcast_concatenate)
# Functions/classes in other files should be added in `__init__.py`, not here
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore',
'cumfreq', 'relfreq', 'obrientransform',
'sem', 'zmap', 'zscore', 'gzscore', 'iqr', 'gstd',
'median_absolute_deviation', 'median_abs_deviation',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean',
'f_oneway', 'F_onewayConstantInputWarning',
'F_onewayBadInputSizesWarning',
'PearsonRConstantInputWarning', 'PearsonRNearConstantInputWarning',
'pearsonr', 'fisher_exact',
'SpearmanRConstantInputWarning', 'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau', 'multiscale_graphcorr',
'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel',
'kstest', 'ks_1samp', 'ks_2samp',
'chisquare', 'power_divergence',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'rankdata',
'combine_pvalues', 'wasserstein_distance', 'energy_distance',
'brunnermunzel', 'alexandergovern']
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore', over='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# This can happen when attempting to sum things which are not
# numbers (e.g. as in the function `mode`). Try an alternative method:
try:
contains_nan = np.nan in set(a.ravel())
except TypeError:
# Don't know what to do. Fall back to omitting nan values and
# issue a warning.
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly "
"checked for nan values. nan values "
"will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return contains_nan, nan_policy
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _shape_with_dropped_axis(a, axis):
"""
Given an array `a` and an integer `axis`, return the shape
of `a` with the `axis` dimension removed.
Examples
--------
>>> a = np.zeros((3, 5, 2))
>>> _shape_with_dropped_axis(a, 1)
(3, 2)
"""
shp = list(a.shape)
try:
del shp[axis]
except IndexError:
raise np.AxisError(axis, a.ndim) from None
return tuple(shp)
def _broadcast_shapes(shape1, shape2):
"""
Given two shapes (i.e. tuples of integers), return the shape
that would result from broadcasting two arrays with the given
shapes.
Examples
--------
>>> _broadcast_shapes((2, 1), (4, 1, 3))
(4, 2, 3)
"""
d = len(shape1) - len(shape2)
if d <= 0:
shp1 = (1,)*(-d) + shape1
shp2 = shape2
else:
shp1 = shape1
shp2 = (1,)*d + shape2
shape = []
for n1, n2 in zip(shp1, shp2):
if n1 == 1:
n = n2
elif n2 == 1 or n1 == n2:
n = n1
else:
raise ValueError(f'shapes {shape1} and {shape2} could not be '
'broadcast together')
shape.append(n)
return tuple(shape)
def _broadcast_shapes_with_dropped_axis(a, b, axis):
"""
Given two arrays `a` and `b` and an integer `axis`, find the
shape of the broadcast result after dropping `axis` from the
shapes of `a` and `b`.
Examples
--------
>>> a = np.zeros((5, 2, 1))
>>> b = np.zeros((1, 9, 3))
>>> _broadcast_shapes_with_dropped_axis(a, b, 1)
(5, 3)
"""
shp1 = _shape_with_dropped_axis(a, axis)
shp2 = _shape_with_dropped_axis(b, axis)
try:
shp = _broadcast_shapes(shp1, shp2)
except ValueError:
raise ValueError(f'non-axis shapes {shp1} and {shp2} could not be '
'broadcast together') from None
return shp
# note that `weights` are paired with `x`
@_axis_nan_policy_factory(
lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
result_unpacker=lambda x: (x,), kwd_samples=['weights'])
def gmean(a, axis=0, dtype=None, weights=None):
"""Compute the geometric mean along the specified axis.
Return the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
weights : array_like, optional
The `weights` array must be broadcastable to the same shape as `a`.
Default is None, which gives each value a weight of 1.0.
Returns
-------
gmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
References
----------
.. [1] "Weighted Geometric Mean", *Wikipedia*, https://en.wikipedia.org/wiki/Weighted_geometric_mean.
Examples
--------
>>> from scipy.stats import gmean
>>> gmean([1, 4])
2.0
>>> gmean([1, 2, 3, 4, 5, 6, 7])
3.3800151591412964
"""
if not isinstance(a, np.ndarray):
# if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
if weights is not None:
weights = np.asanyarray(weights, dtype=dtype)
return np.exp(np.average(log_a, axis=axis, weights=weights))
@_axis_nan_policy_factory(
lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,
result_unpacker=lambda x: (x,), kwd_samples=['weights'])
def hmean(a, axis=0, dtype=None, *, weights=None):
"""Calculate the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
weights : array_like, optional
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given `axis`) or of the same shape as `a`.
Default is None, which gives each value a weight of 1.0.
.. versionadded:: 1.9
Returns
-------
hmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
References
----------
.. [1] "Weighted Harmonic Mean", *Wikipedia*,
https://en.wikipedia.org/wiki/Harmonic_mean#Weighted_harmonic_mean
.. [2] Ferger, F., "The nature and use of the harmonic mean", Journal of
the American Statistical Association, vol. 26, pp. 36-40, 1931
Examples
--------
>>> from scipy.stats import hmean
>>> hmean([1, 4])
1.6000000000000001
>>> hmean([1, 2, 3, 4, 5, 6, 7])
2.6997245179063363
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
a = np.ma.asarray(a, dtype=dtype)
else:
a = np.asarray(a, dtype=dtype)
if np.all(a >= 0):
# Harmonic mean only defined if greater than or equal to zero.
if weights is not None:
weights = np.asanyarray(weights, dtype=dtype)
with np.errstate(divide='ignore'):
return 1.0 / np.average(1.0 / a, axis=axis, weights=weights)
else:
raise ValueError("Harmonic mean only defined if all elements greater "
"than or equal to zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""Return an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
ModeResult(mode=array([3]), count=array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
if a.dtype == object and np.nan in set(a.ravel()):
# Fall back to a slower method since np.unique does not work with NaN
scores = set(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.sum(template, axis, keepdims=True)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mode1D(a):
vals, cnts = np.unique(a, return_counts=True)
return vals[cnts.argmax()], cnts.max()
# np.apply_along_axis will convert the _mode1D tuples to a numpy array,
# casting types in the process.
# This recreates the results without that issue
# View of a, rotated so the requested axis is last
a_view = np.moveaxis(a, axis, -1)
inds = np.ndindex(a_view.shape[:-1])
modes = np.empty(a_view.shape[:-1], dtype=a.dtype)
counts = np.empty(a_view.shape[:-1], dtype=np.int_)
for ind in inds:
modes[ind], counts[ind] = _mode1D(a_view[ind])
newshape = list(a.shape)
newshape[axis] = 1
return ModeResult(modes.reshape(newshape), counts.reshape(newshape))
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : ndarray
Trimmed mean.
See Also
--------
trim_mean : Returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, axis)
am = _mask_to_limits(a, limits, inclusive)
mean = np.ma.filled(am.mean(axis=axis), fill_value=np.nan)
return mean if mean.ndim > 0 else mean.item()
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed variance.
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float)
if limits is None:
return a.var(ddof=ddof, axis=axis)
am = _mask_to_limits(a, limits, inclusive)
amnan = am.filled(fill_value=np.nan)
return np.nanvar(amnan, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""Compute the trimmed minimum.
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
Array of values.
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmin : float, int or ndarray
Trimmed minimum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""Compute the trimmed maximum.
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
Array of values.
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmax : float, int or ndarray
Trimmed maximum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed sample standard deviation.
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Trimmed sample standard deviation.
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Trimmed standard error of the mean.
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""Calculate the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of
points. It is often used to calculate coefficients of skewness and kurtosis
due to its close relationship with them.
Parameters
----------
a : array_like
Input array.
moment : int or array_like of ints, optional
Order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See Also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
Examples
--------
>>> from scipy.stats import moment
>>> moment([1, 2, 3, 4, 5], moment=1)
0.0
>>> moment([1, 2, 3, 4, 5], moment=2)
2.0
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
moment_shape = list(a.shape)
del moment_shape[axis]
dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
# empty array, return nan(s) with shape matching `moment`
out_shape = (moment_shape if np.isscalar(moment)
else [len(moment)] + moment_shape)
if len(out_shape) == 0:
return dtype(np.nan)
else:
return np.full(out_shape, np.nan, dtype=dtype)
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mean = a.mean(axis, keepdims=True)
mmnt = [_moment(a, i, axis, mean=mean) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
# Moment with optional pre-computed mean, equal to a.mean(axis, keepdims=True)
def _moment(a, moment, axis, *, mean=None):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0 or moment == 1:
# By definition the zeroth moment about the mean is 1, and the first
# moment is 0.
shape = list(a.shape)
del shape[axis]
dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
if len(shape) == 0:
return dtype(1.0 if moment == 0 else 0.0)
else:
return (np.ones(shape, dtype=dtype) if moment == 0
else np.zeros(shape, dtype=dtype))
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n - 1) / 2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
mean = a.mean(axis, keepdims=True) if mean is None else mean
a_zero_mean = a - mean
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
r"""Compute the sample skewness of a data set.
For normally distributed data, the skewness should be about zero. For
unimodal continuous distributions, a skewness value greater than zero means
that there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to zero, statistically speaking.
Parameters
----------
a : ndarray
Input array.
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
Notes
-----
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.
.. math::
g_1=\frac{m_3}{m_2^{3/2}}
where
.. math::
m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i
is the biased sample :math:`i\texttt{th}` central moment, and
:math:`\bar{x}` is
the sample mean. If ``bias`` is False, the calculations are
corrected for bias and the value computed is the adjusted
Fisher-Pearson standardized moment coefficient, i.e.
.. math::
G_1=\frac{k_3}{k_2^{3/2}}=
\frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
Examples
--------
>>> from scipy.stats import skew
>>> skew([1, 2, 3, 4, 5])
0.0
>>> skew([2, 8, 0, 4, 1, 9, 9, 0])
0.2650554122698573
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m3 = _moment(a, 3, axis, mean=mean)
with np.errstate(all='ignore'):
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
vals = np.where(zero, 0, m3 / m2**1.5)
if not bias:
can_correct = ~zero & (n > 2)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
@_axis_nan_policy_factory(
lambda x: x, result_unpacker=lambda x: (x,), n_outputs=1
)
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""Compute the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
Data for which the kurtosis is calculated.
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
In Fisher's definiton, the kurtosis of the normal distribution is zero.
In the following example, the kurtosis is close to zero, because it was
calculated from the dataset, not from the continuous distribution.
>>> from scipy.stats import norm, kurtosis
>>> data = norm.rvs(size=1000, random_state=3)
>>> kurtosis(data)
-0.06928694200380558
The distribution with a higher kurtosis has a heavier tail.
The zero valued kurtosis of the normal distribution in Fisher's definition
can serve as a reference point.
>>> import matplotlib.pyplot as plt
>>> import scipy.stats as stats
>>> from scipy.stats import kurtosis
>>> x = np.linspace(-5, 5, 100)
>>> ax = plt.subplot()
>>> distnames = ['laplace', 'norm', 'uniform']
>>> for distname in distnames:
... if distname == 'uniform':
... dist = getattr(stats, distname)(loc=-2, scale=4)
... else:
... dist = getattr(stats, distname)
... data = dist.rvs(size=1000)
... kur = kurtosis(data, fisher=True)
... y = dist.pdf(x)
... ax.plot(x, y, label="{}, {}".format(distname, round(kur, 3)))
... ax.legend()
The Laplace distribution has a heavier tail than the normal distribution.
The uniform distribution (which has negative kurtosis) has the thinnest
tail.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m4 = _moment(a, 4, axis, mean=mean)
with np.errstate(all='ignore'):
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
vals = np.where(zero, 0, m4 / m2**2.0)
if not bias:
can_correct = ~zero & (n > 3)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
return vals - 3 if fisher else vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""Compute several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected
for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
nobs : int or ndarray of ints
Number of observations (length of data along `axis`).
When 'omit' is chosen as nan_policy, the length along each axis
slice is counted separately.
minmax: tuple of ndarrays or floats
Minimum and maximum value of `a` along the given axis.
mean : ndarray or float
Arithmetic mean of `a` along the given axis.
variance : ndarray or float
Unbiased variance of `a` along the given axis; denominator is number
of observations minus one.
skewness : ndarray or float
Skewness of `a` along the given axis, based on moment calculations
with denominator equal to the number of observations, i.e. no degrees
of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher) of `a` along the given axis. The kurtosis is
normalized so that it is zero for the normal distribution. No
degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5,
variance=9.166666666666666, skewness=0.0,
kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([2., 3.]), variance=array([2., 2.]),
skewness=array([0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def _normtest_finish(z, alternative):
"""Common code between all the normality-test functions."""
if alternative == 'less':
prob = distributions.norm.cdf(z)
elif alternative == 'greater':
prob = distributions.norm.sf(z)
elif alternative == 'two-sided':
prob = 2 * distributions.norm.sf(np.abs(z))
else:
raise ValueError("alternative must be "
"'less', 'greater' or 'two-sided'")
if z.ndim == 0:
z = z[()]
return z, prob
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
"""Test whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the skewness of the distribution underlying the sample
is different from that of the normal distribution (i.e. 0)
* 'less': the skewness of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the skewness of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The p-value for the hypothesis test.
Notes
-----
The sample size must be at least 8.
References
----------
.. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
"A suggestion for using powerful and informative tests of
normality", American Statistician 44, pp. 316-321, 1990.
Examples
--------
>>> from scipy.stats import skewtest
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8])
SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897)
>>> skewtest([2, 8, 0, 4, 1, 9, 9, 0])
SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000])
SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133)
>>> skewtest([100, 100, 100, 100, 100, 100, 100, 101])
SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='less')
SkewtestResult(statistic=1.0108048609177787, pvalue=0.8439450819289052)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='greater')
SkewtestResult(statistic=1.0108048609177787, pvalue=0.15605491807109484)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis, alternative)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = a.shape[axis]
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(*_normtest_finish(Z, alternative))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
"""Test whether a dataset has normal kurtosis.
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution.
Parameters
----------
a : array
Array of the sample data.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the kurtosis of the distribution underlying the sample
is different from that of the normal distribution
* 'less': the kurtosis of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the kurtosis of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The p-value for the hypothesis test.
Notes
-----
Valid only for n>20. This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
Examples
--------
>>> from scipy.stats import kurtosistest
>>> kurtosistest(list(range(20)))
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348)
>>> kurtosistest(list(range(20)), alternative='less')
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.04402169166264174)
>>> kurtosistest(list(range(20)), alternative='greater')
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.9559783083373583)
>>> rng = np.random.default_rng()
>>> s = rng.normal(0, 1, 1000)
>>> kurtosistest(s)
KurtosistestResult(statistic=-1.475047944490622, pvalue=0.14019965402996987)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis, alternative)
n = a.shape[axis]
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
term2 = np.sign(denom) * np.where(denom == 0.0, np.nan,
np.power((1-2.0/A)/np.abs(denom), 1/3.0))
if np.any(denom == 0):
msg = "Test statistic not defined in some cases due to division by " \
"zero. Return nan in that case..."
warnings.warn(msg, RuntimeWarning)
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(*_normtest_finish(Z, alternative))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""Test whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the sample to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> pts = 1000
>>> a = rng.normal(0, 1, size=pts)
>>> b = rng.normal(2, 1, size=pts)
>>> x = np.concatenate((a, b))
>>> k2, p = stats.normaltest(x)
>>> alpha = 1e-3
>>> print("p = {:g}".format(p))
p = 8.4713e-19
>>> if p < alpha: # null hypothesis: x comes from a normal distribution
... print("The null hypothesis can be rejected")
... else:
... print("The null hypothesis cannot be rejected")
The null hypothesis can be rejected
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
Jarque_beraResult = namedtuple('Jarque_beraResult', ('statistic', 'pvalue'))
def jarque_bera(x):
"""Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = rng.normal(0, 1, 100000)
>>> jarque_bera_test = stats.jarque_bera(x)
>>> jarque_bera_test
Jarque_beraResult(statistic=3.3415184718131554, pvalue=0.18810419594996775)
>>> jarque_bera_test.statistic
3.3415184718131554
>>> jarque_bera_test.pvalue
0.18810419594996775
"""
x = np.asarray(x)
n = x.size
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return Jarque_beraResult(jb_value, p)
#####################################
# FREQUENCY FUNCTIONS #
#####################################
# deindent to work around numpy/gh-16202
@np.deprecate(
message="`itemfreq` is deprecated and will be removed in a "
"future version. Use instead `np.unique(..., return_counts=True)`")
def itemfreq(a):
"""
Return a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
Specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
The following options are available (default is 'fraction'):
* 'fraction': ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``
* 'lower': ``i``
* 'higher': ``j``
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.full(np.asarray(per).shape, np.nan, dtype=np.float64)
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted_ = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted_, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted_, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted_, i,
interpolation_method, axis)
for i in per]
return np.array(score)
if not (0 <= per <= 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted_.ndim
idx = per / 100. * (sorted_.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted_.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""Compute the percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
Specifies the interpretation of the resulting score.
The following options are available (default is 'rank'):
* 'rank': Average percentage ranking of score. In case of multiple
matches, average the percentage rankings of all matching scores.
* 'weak': This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80% means that 80%
of values are less than or equal to the provided score.
* 'strict': Similar to "weak", except that only values that are
strictly less than the given score are counted.
* 'mean': The average of the "weak" and "strict" scores, often used
in testing. See https://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
if np.isnan(score):
return np.nan
a = np.asarray(a)
n = len(a)
if n == 0:
return 100.0
if kind == 'rank':
left = np.count_nonzero(a < score)
right = np.count_nonzero(a <= score)
pct = (right + left + (1 if right > left else 0)) * 50.0/n
return pct
elif kind == 'strict':
return np.count_nonzero(a < score) / n * 100
elif kind == 'weak':
return np.count_nonzero(a <= score) / n * 100
elif kind == 'mean':
pct = (np.count_nonzero(a < score)
+ np.count_nonzero(a <= score)) / n * 50
return pct
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
def _histogram(a, numbins=10, defaultlimits=None, weights=None,
printextras=False):
"""Create a histogram.
Separate the range into several bins and return the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""Return a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy.random import default_rng
>>> from scipy import stats
>>> rng = default_rng()
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""Return a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit.
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy.random import default_rng
>>> from scipy import stats
>>> rng = default_rng()
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / a.shape[0]
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""Compute the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
*args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
sLast = None
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
sLast = a.shape
if sLast:
for arr in arrays[:-1]:
if sLast != arr.shape:
return np.array(arrays, dtype=object)
return np.array(arrays)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""Compute standard error of the mean.
Calculate the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def _isconst(x):
"""
Check if all values in x are the same. nans are ignored.
x must be a 1d array.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([True])
else:
return (y[0] == y).all(keepdims=True)
def _quiet_nanmean(x):
"""
Compute nanmean for the 1d array x, but quietly return nan if x is all nan.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([np.nan])
else:
return np.mean(y, keepdims=True)
def _quiet_nanstd(x, ddof=0):
"""
Compute nanstd for the 1d array x, but quietly return nan if x is all nan.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([np.nan])
else:
return np.std(y, keepdims=True, ddof=ddof)
def zscore(a, axis=0, ddof=0, nan_policy='propagate'):
"""
Compute the z score.
Compute the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that when the value is 'omit',
nans in the input also propagate to the output, but they do not affect
the z-scores computed for the non-nan values.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
An example with `nan_policy='omit'`:
>>> x = np.array([[25.11, 30.10, np.nan, 32.02, 43.15],
... [14.95, 16.06, 121.25, 94.35, 29.81]])
>>> stats.zscore(x, axis=1, nan_policy='omit')
array([[-1.13490897, -0.37830299, nan, -0.08718406, 1.60039602],
[-0.91611681, -0.89090508, 1.4983032 , 0.88731639, -0.5785977 ]])
"""
return zmap(a, a, axis=axis, ddof=ddof, nan_policy=nan_policy)
def gzscore(a, *, axis=0, ddof=0, nan_policy='propagate'):
"""
Compute the geometric standard score.
Compute the geometric z score of each strictly positive value in the
sample, relative to the geometric mean and standard deviation.
Mathematically the geometric z score can be evaluated as::
gzscore = log(a/gmu) / log(gsigma)
where ``gmu`` (resp. ``gsigma``) is the geometric mean (resp. standard
deviation).
Parameters
----------
a : array_like
Sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that when the value is 'omit',
nans in the input also propagate to the output, but they do not affect
the geometric z scores computed for the non-nan values.
Returns
-------
gzscore : array_like
The geometric z scores, standardized by geometric mean and geometric
standard deviation of input array `a`.
See Also
--------
gmean : Geometric mean
gstd : Geometric standard deviation
zscore : Standard score
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses ``asanyarray`` instead of
``asarray`` for parameters).
.. versionadded:: 1.8
Examples
--------
Draw samples from a log-normal distribution:
>>> from scipy.stats import zscore, gzscore
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> mu, sigma = 3., 1. # mean and standard deviation
>>> x = rng.lognormal(mu, sigma, size=500)
Display the histogram of the samples:
>>> fig, ax = plt.subplots()
>>> ax.hist(x, 50)
>>> plt.show()
Display the histogram of the samples standardized by the classical zscore.
Distribution is rescaled but its shape is unchanged.
>>> fig, ax = plt.subplots()
>>> ax.hist(zscore(x), 50)
>>> plt.show()
Demonstrate that the distribution of geometric zscores is rescaled and
quasinormal:
>>> fig, ax = plt.subplots()
>>> ax.hist(gzscore(x), 50)
>>> plt.show()
"""
a = np.asanyarray(a)
log = ma.log if isinstance(a, ma.MaskedArray) else np.log
return zscore(log(a), axis=axis, ddof=ddof, nan_policy=nan_policy)
def zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'):
"""
Calculate the relative z-scores.
Return an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle the occurrence of nans in `compare`.
'propagate' returns nan, 'raise' raises an exception, 'omit'
performs the calculations ignoring nan values. Default is
'propagate'. Note that when the value is 'omit', nans in `scores`
also propagate to the output, but they do not affect the z-scores
computed for the non-nan values.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
a = np.asanyarray(compare)
if a.size == 0:
return np.empty(a.shape)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
if axis is None:
mn = _quiet_nanmean(a.ravel())
std = _quiet_nanstd(a.ravel(), ddof=ddof)
isconst = _isconst(a.ravel())
else:
mn = np.apply_along_axis(_quiet_nanmean, axis, a)
std = np.apply_along_axis(_quiet_nanstd, axis, a, ddof=ddof)
isconst = np.apply_along_axis(_isconst, axis, a)
else:
mn = a.mean(axis=axis, keepdims=True)
std = a.std(axis=axis, ddof=ddof, keepdims=True)
if axis is None:
isconst = (a.item(0) == a).all()
else:
isconst = (_first(a, axis) == a).all(axis=axis, keepdims=True)
# Set std deviations that are 0 to 1 to avoid division by 0.
std[isconst] = 1.0
z = (scores - mn) / std
# Set the outputs associated with a constant input to nan.
z[np.broadcast_to(isconst, z.shape)] = np.nan
return z
def gstd(a, axis=0, ddof=1):
"""
Calculate the geometric standard deviation of an array.
The geometric standard deviation describes the spread of a set of numbers
where the geometric mean is preferred. It is a multiplicative factor, and
so a dimensionless quantity.
It is defined as the exponent of the standard deviation of ``log(a)``.
Mathematically the population geometric standard deviation can be
evaluated as::
gstd = exp(std(log(a)))
.. versionadded:: 1.3.0
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int, tuple or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degree of freedom correction in the calculation of the
geometric standard deviation. Default is 1.
Returns
-------
ndarray or float
An array of the geometric standard deviation. If `axis` is None or `a`
is a 1d array a float is returned.
See Also
--------
gmean : Geometric mean
numpy.std : Standard deviation
Notes
-----
As the calculation requires the use of logarithms the geometric standard
deviation only supports strictly positive values. Any non-positive or
infinite values will raise a `ValueError`.
The geometric standard deviation is sometimes confused with the exponent of
the standard deviation, ``exp(std(a))``. Instead the geometric standard
deviation is ``exp(std(log(a)))``.
The default value for `ddof` is different to the default value (0) used
by other ddof containing functions, such as ``np.std`` and ``np.nanstd``.
References
----------
.. [1] Kirkwood, T. B., "Geometric means and measures of dispersion",
Biometrics, vol. 35, pp. 908-909, 1979
Examples
--------
Find the geometric standard deviation of a log-normally distributed sample.
Note that the standard deviation of the distribution is one, on a
log scale this evaluates to approximately ``exp(1)``.
>>> from scipy.stats import gstd
>>> rng = np.random.default_rng()
>>> sample = rng.lognormal(mean=0, sigma=1, size=1000)
>>> gstd(sample)
2.810010162475324
Compute the geometric standard deviation of a multidimensional array and
of a given axis.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> gstd(a, axis=None)
2.2944076136018947
>>> gstd(a, axis=2)
array([[1.82424757, 1.22436866, 1.13183117],
[1.09348306, 1.07244798, 1.05914985]])
>>> gstd(a, axis=(1,2))
array([2.12939215, 1.22120169])
The geometric standard deviation further handles masked arrays.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> ma = np.ma.masked_where(a > 16, a)
>>> ma
masked_array(
data=[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[--, --, --, --],
[--, --, --, --]]],
mask=[[[False, False, False, False],
[False, False, False, False],
[False, False, False, False]],
[[False, False, False, False],
[ True, True, True, True],
[ True, True, True, True]]],
fill_value=999999)
>>> gstd(ma, axis=2)
masked_array(
data=[[1.8242475707663655, 1.2243686572447428, 1.1318311657788478],
[1.0934830582350938, --, --]],
mask=[[False, False, False],
[False, True, True]],
fill_value=999999)
"""
a = np.asanyarray(a)
log = ma.log if isinstance(a, ma.MaskedArray) else np.log
try:
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
return np.exp(np.std(log(a), axis=axis, ddof=ddof))
except RuntimeWarning as w:
if np.isinf(a).any():
raise ValueError(
'Infinite value encountered. The geometric standard deviation '
'is defined for strictly positive values only.'
) from w
a_nan = np.isnan(a)
a_nan_any = a_nan.any()
# exclude NaN's from negativity check, but
# avoid expensive masking for arrays with no NaN
if ((a_nan_any and np.less_equal(np.nanmin(a), 0)) or
(not a_nan_any and np.less_equal(a, 0).any())):
raise ValueError(
'Non positive value encountered. The geometric standard '
'deviation is defined for strictly positive values only.'
) from w
elif 'Degrees of freedom <= 0 for slice' == str(w):
raise ValueError(w) from w
else:
# Remaining warnings don't need to be exceptions.
return np.exp(np.std(log(a, where=~a_nan), axis=axis, ddof=ddof))
except TypeError as e:
raise ValueError(
'Invalid array input. The inputs could not be '
'safely coerced to any supported types') from e
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale=1.0, nan_policy='propagate',
interpolation='linear', keepdims=False):
r"""
Compute the interquartile range of the data along the specified axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
``(25, 75)``. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
* 'raw' : No scaling, just return the raw IQR.
**Deprecated!** Use ``scale=1`` instead.
* 'normal' : Scale by
:math:`2 \sqrt{2} erf^{-1}(\frac{1}{2}) \approx 1.349`.
The default is 1.0. The use of ``scale='raw'`` is deprecated.
Array-like `scale` is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
interpolation : str, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points ``i`` and ``j``.
The following options are available (default is 'linear'):
* 'linear': ``i + (j - i)*fraction``, where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
* 'lower': ``i``.
* 'higher': ``j``.
* 'nearest': ``i`` or ``j`` whichever is nearest.
* 'midpoint': ``(i + j)/2``.
For NumPy >= 1.22.0, the additional options provided by the ``method``
keyword of `numpy.percentile` are also valid.
keepdims : bool, optional
If this is set to True, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, str):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
if scale_key == 'raw':
warnings.warn(
"use of scale='raw' is deprecated, use scale=1.0 instead",
np.VisibleDeprecationWarning
)
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = np.nanpercentile
else:
percentile_func = np.percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
if np.isnan(rng).any():
raise ValueError("range must not contain NaNs")
rng = sorted(rng)
if NumpyVersion(np.__version__) >= '1.22.0':
pct = percentile_func(x, rng, axis=axis, method=interpolation,
keepdims=keepdims)
else:
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _mad_1d(x, center, nan_policy):
# Median absolute deviation for 1-d array x.
# This is a helper function for `median_abs_deviation`; it assumes its
# arguments have been validated already. In particular, x must be a
# 1-d numpy array, center must be callable, and if nan_policy is not
# 'propagate', it is assumed to be 'omit', because 'raise' is handled
# in `median_abs_deviation`.
# No warning is generated if x is empty or all nan.
isnan = np.isnan(x)
if isnan.any():
if nan_policy == 'propagate':
return np.nan
x = x[~isnan]
if x.size == 0:
# MAD of an empty array is nan.
return np.nan
# Edge cases have been handled, so do the basic MAD calculation.
med = center(x)
mad = np.median(np.abs(x - med))
return mad
def median_abs_deviation(x, axis=0, center=np.median, scale=1.0,
nan_policy='propagate'):
r"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.5.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the
function signature ``func(arr, axis)``.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The default is 1.0. The string "normal" is also accepted,
and results in `scale` being the inverse of the standard normal
quantile function at 0.75, which is approximately 0.67449.
Array-like scale is also allowed, as long as it broadcasts correctly
to the output such that ``out / scale`` is a valid operation. The
output dimensions depend on the input array, `x`, and the `axis`
argument.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
The input array may contain `inf`, but if `center` returns `inf`, the
corresponding MAD for that data will be `nan`.
References
----------
.. [1] "Median absolute deviation",
https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale",
https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_abs_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_abs_deviation(x)
0.82832610097857
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_abs_deviation(x)
0.8323442311590675
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_abs_deviation(x)
array([3.5, 2.5, 1.5])
>>> stats.median_abs_deviation(x, axis=None)
2.0
Scale normal example:
>>> x = stats.norm.rvs(size=1000000, scale=2, random_state=123456)
>>> stats.median_abs_deviation(x)
1.3487398527041636
>>> stats.median_abs_deviation(x, scale='normal')
1.9996446978061115
"""
if not callable(center):
raise TypeError("The argument 'center' must be callable. The given "
f"value {repr(center)} is not callable.")
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, str):
if scale.lower() == 'normal':
scale = 0.6744897501960817 # special.ndtri(0.75)
else:
raise ValueError(f"{scale} is not a valid scale value.")
x = asarray(x)
# Consistent with `np.var` and `np.std`.
if not x.size:
if axis is None:
return np.nan
nan_shape = tuple(item for i, item in enumerate(x.shape) if i != axis)
if nan_shape == ():
# Return nan, not array(nan)
return np.nan
return np.full(nan_shape, np.nan)
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan:
if axis is None:
mad = _mad_1d(x.ravel(), center, nan_policy)
else:
mad = np.apply_along_axis(_mad_1d, axis, x, center, nan_policy)
else:
if axis is None:
med = center(x, axis=None)
mad = np.median(np.abs(x - med))
else:
# Wrap the call to center() in expand_dims() so it acts like
# keepdims=True was used.
med = np.expand_dims(center(x, axis=axis), axis)
mad = np.median(np.abs(x - med), axis=axis)
return mad / scale
# Keep the top newline so that the message does not show up on the stats page
_median_absolute_deviation_deprec_msg = """
To preserve the existing default behavior, use
`scipy.stats.median_abs_deviation(..., scale=1/1.4826)`.
The value 1.4826 is not numerically precise for scaling
with a normal distribution. For a numerically precise value, use
`scipy.stats.median_abs_deviation(..., scale='normal')`.
"""
# Due to numpy/gh-16349 we need to unindent the entire docstring
@np.deprecate(old_name='median_absolute_deviation',
new_name='median_abs_deviation',
message=_median_absolute_deviation_deprec_msg)
def median_absolute_deviation(x, axis=0, center=np.median, scale=1.4826,
nan_policy='propagate'):
r"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.3.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the function
signature ``func(arr, axis)``.
scale : int, optional
The scaling factor applied to the MAD. The default scale (1.4826)
ensures consistency with the standard deviation for normally distributed
data.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
References
----------
.. [1] "Median absolute deviation",
https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale",
https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_absolute_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_absolute_deviation(x)
1.2280762773108278
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_absolute_deviation(x)
1.2340335571164334
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_absolute_deviation(x)
array([5.1891, 3.7065, 2.2239])
>>> stats.median_absolute_deviation(x, axis=None)
2.9652
"""
if isinstance(scale, str):
if scale.lower() == 'raw':
warnings.warn(
"use of scale='raw' is deprecated, use scale=1.0 instead",
np.VisibleDeprecationWarning
)
scale = 1.0
if not isinstance(scale, str):
scale = 1 / scale
return median_abs_deviation(x, axis=axis, center=center, scale=scale,
nan_policy=nan_policy)
#####################################
# TRIMMING FUNCTIONS #
#####################################
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""Perform iterative sigma-clipping of array elements.
Starting from the full sample, all elements outside the critical range are
removed, i.e. all elements of the input array `c` that satisfy either of
the following conditions::
c < mean(c) - std(c)*low
c > mean(c) + std(c)*high
The iteration continues with the updated sample until no
elements are outside the (updated) range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std * low
critupper = c_mean + c_std * high
c = c[(c >= critlower) & (c <= critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""Slice off a proportion of items from both ends of an array.
Slice off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slice off less if proportion results in a non-integer slice index (i.e.
conservatively slices off `proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[tuple(sl)]
def trim1(a, proportiontocut, tail='right', axis=0):
"""Slice off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slice off less if proportion results in a non-integer slice index
(i.e. conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution.
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trim1(a, 0.5, 'left')
>>> b
array([10, 11, 12, 13, 14, 16, 15, 17, 18, 19])
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of both tails of the distribution.
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : Compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[tuple(sl)], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
class F_onewayConstantInputWarning(RuntimeWarning):
"""
Warning generated by `f_oneway` when an input is constant, e.g.
each of the samples provided is a constant array.
"""
def __init__(self, msg=None):
if msg is None:
msg = ("Each of the input arrays is constant;"
"the F statistic is not defined or infinite")
self.args = (msg,)
class F_onewayBadInputSizesWarning(RuntimeWarning):
"""
Warning generated by `f_oneway` when an input has length 0,
or if all the inputs have length 1.
"""
pass
def _create_f_oneway_nan_result(shape, axis):
"""
This is a helper function for f_oneway for creating the return values
in certain degenerate conditions. It creates return values that are
all nan with the appropriate shape for the given `shape` and `axis`.
"""
axis = np.core.multiarray.normalize_axis_index(axis, len(shape))
shp = shape[:axis] + shape[axis+1:]
if shp == ():
f = np.nan
prob = np.nan
else:
f = np.full(shp, fill_value=np.nan)
prob = f.copy()
return F_onewayResult(f, prob)
def _first(arr, axis):
"""Return arr[..., 0:1, ...] where 0:1 is in the `axis` position."""
return np.take_along_axis(arr, np.array(0, ndmin=arr.ndim), axis)
def f_oneway(*args, axis=0):
"""Perform one-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group. There must be at least
two arguments. If the arrays are multidimensional, then all the
dimensions of the array must be the same except for `axis`.
axis : int, optional
Axis of the input arrays along which the test is applied.
Default is 0.
Returns
-------
statistic : float
The computed F statistic of the test.
pvalue : float
The associated p-value from the F distribution.
Warns
-----
F_onewayConstantInputWarning
Raised if each of the input arrays is constant array.
In this case the F statistic is either infinite or isn't defined,
so ``np.inf`` or ``np.nan`` is returned.
F_onewayBadInputSizesWarning
Raised if the length of any input array is 0, or if all the input
arrays have length 1. ``np.nan`` is returned for the F statistic
and the p-value in these cases.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still
be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) or
the Alexander-Govern test (`scipy.stats.alexandergovern`) although with
some loss of power.
The length of each group must be at least one, and there must be at
least one group with length greater than one. If these conditions
are not satisfied, a warning is generated and (``np.nan``, ``np.nan``)
is returned.
If each group contains constant values, and there exist at least two
groups with different values, the function generates a warning and
returns (``np.inf``, 0).
If all values in all groups are the same, function generates a warning
and returns (``np.nan``, ``np.nan``).
The algorithm is from Heiman [2]_, pp.394-7.
References
----------
.. [1] R. Lowry, "Concepts and Applications of Inferential Statistics",
Chapter 14, 2014, http://vassarstats.net/textbook/
.. [2] G.W. Heiman, "Understanding research methods and statistics: An
integrated introduction for psychology", Houghton, Mifflin and
Company, 2001.
.. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> from scipy.stats import f_oneway
Here are some data [3]_ on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
F_onewayResult(statistic=7.121019471642447, pvalue=0.0002812242314534544)
`f_oneway` accepts multidimensional input arrays. When the inputs
are multidimensional and `axis` is not given, the test is performed
along the first axis of the input arrays. For the following data, the
test is performed three times, once for each column.
>>> a = np.array([[9.87, 9.03, 6.81],
... [7.18, 8.35, 7.00],
... [8.39, 7.58, 7.68],
... [7.45, 6.33, 9.35],
... [6.41, 7.10, 9.33],
... [8.00, 8.24, 8.44]])
>>> b = np.array([[6.35, 7.30, 7.16],
... [6.65, 6.68, 7.63],
... [5.72, 7.73, 6.72],
... [7.01, 9.19, 7.41],
... [7.75, 7.87, 8.30],
... [6.90, 7.97, 6.97]])
>>> c = np.array([[3.31, 8.77, 1.01],
... [8.25, 3.24, 3.62],
... [6.32, 8.81, 5.19],
... [7.48, 8.83, 8.91],
... [8.59, 6.01, 6.07],
... [3.07, 9.72, 7.48]])
>>> F, p = f_oneway(a, b, c)
>>> F
array([1.75676344, 0.03701228, 3.76439349])
>>> p
array([0.20630784, 0.96375203, 0.04733157])
"""
if len(args) < 2:
raise TypeError(f'at least two inputs are required; got {len(args)}.')
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
# We haven't explicitly validated axis, but if it is bad, this call of
# np.concatenate will raise np.AxisError. The call will raise ValueError
# if the dimensions of all the arrays, except the axis dimension, are not
# the same.
alldata = np.concatenate(args, axis=axis)
bign = alldata.shape[axis]
# Check this after forming alldata, so shape errors are detected
# and reported before checking for 0 length inputs.
if any(arg.shape[axis] == 0 for arg in args):
warnings.warn(F_onewayBadInputSizesWarning('at least one input '
'has length 0'))
return _create_f_oneway_nan_result(alldata.shape, axis)
# Must have at least one group with length greater than 1.
if all(arg.shape[axis] == 1 for arg in args):
msg = ('all input arrays have length 1. f_oneway requires that at '
'least one input has length greater than 1.')
warnings.warn(F_onewayBadInputSizesWarning(msg))
return _create_f_oneway_nan_result(alldata.shape, axis)
# Check if the values within each group are constant, and if the common
# value in at least one group is different from that in another group.
# Based on https://github.com/scipy/scipy/issues/11669
# If axis=0, say, and the groups have shape (n0, ...), (n1, ...), ...,
# then is_const is a boolean array with shape (num_groups, ...).
# It is True if the groups along the axis slice are each consant.
# In the typical case where each input array is 1-d, is_const is a
# 1-d array with length num_groups.
is_const = np.concatenate([(_first(a, axis) == a).all(axis=axis,
keepdims=True)
for a in args], axis=axis)
# all_const is a boolean array with shape (...) (see previous comment).
# It is True if the values within each group along the axis slice are
# the same (e.g. [[3, 3, 3], [5, 5, 5, 5], [4, 4, 4]]).
all_const = is_const.all(axis=axis)
if all_const.any():
warnings.warn(F_onewayConstantInputWarning())
# all_same_const is True if all the values in the groups along the axis=0
# slice are the same (e.g. [[3, 3, 3], [3, 3, 3, 3], [3, 3, 3]]).
all_same_const = (_first(alldata, axis) == alldata).all(axis=axis)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariant
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean(axis=axis, keepdims=True)
alldata -= offset
normalized_ss = _square_of_sums(alldata, axis=axis) / bign
sstot = _sum_of_squares(alldata, axis=axis) - normalized_ss
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset, axis=axis) / a.shape[axis]
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= normalized_ss
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / dfbn
msw = sswn / dfwn
with np.errstate(divide='ignore', invalid='ignore'):
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
# Fix any f values that should be inf or nan because the corresponding
# inputs were constant.
if np.isscalar(f):
if all_same_const:
f = np.nan
prob = np.nan
elif all_const:
f = np.inf
prob = 0.0
else:
f[all_const] = np.inf
prob[all_const] = 0.0
f[all_same_const] = np.nan
prob[all_same_const] = np.nan
return F_onewayResult(f, prob)
def alexandergovern(*args, nan_policy='propagate'):
"""Performs the Alexander Govern test.
The Alexander-Govern approximation tests the equality of k independent
means in the face of heterogeneity of variance. The test is applied to
samples from two or more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group. There must be at least
two samples.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The computed A statistic of the test.
pvalue : float
The associated p-value from the chi-squared distribution.
Warns
-----
AlexanderGovernConstantInputWarning
Raised if an input is a constant array. The statistic is not defined
in this case, so ``np.nan`` is returned.
See Also
--------
f_oneway : one-way ANOVA
Notes
-----
The use of this test relies on several assumptions.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. Unlike `f_oneway`, this test does not assume on homoscedasticity,
instead relaxing the assumption of equal variances.
Input samples must be finite, one dimensional, and with size greater than
one.
References
----------
.. [1] Alexander, Ralph A., and Diane M. Govern. "A New and Simpler
Approximation for ANOVA under Variance Heterogeneity." Journal
of Educational Statistics, vol. 19, no. 2, 1994, pp. 91-101.
JSTOR, www.jstor.org/stable/1165140. Accessed 12 Sept. 2020.
Examples
--------
>>> from scipy.stats import alexandergovern
Here are some data on annual percentage rate of interest charged on
new car loans at nine of the largest banks in four American cities
taken from the National Institute of Standards and Technology's
ANOVA dataset.
We use `alexandergovern` to test the null hypothesis that all cities
have the same mean APR against the alternative that the cities do not
all have the same mean APR. We decide that a significance level of 5%
is required to reject the null hypothesis in favor of the alternative.
>>> atlanta = [13.75, 13.75, 13.5, 13.5, 13.0, 13.0, 13.0, 12.75, 12.5]
>>> chicago = [14.25, 13.0, 12.75, 12.5, 12.5, 12.4, 12.3, 11.9, 11.9]
>>> houston = [14.0, 14.0, 13.51, 13.5, 13.5, 13.25, 13.0, 12.5, 12.5]
>>> memphis = [15.0, 14.0, 13.75, 13.59, 13.25, 12.97, 12.5, 12.25,
... 11.89]
>>> alexandergovern(atlanta, chicago, houston, memphis)
AlexanderGovernResult(statistic=4.65087071883494,
pvalue=0.19922132490385214)
The p-value is 0.1992, indicating a nearly 20% chance of observing
such an extreme value of the test statistic under the null hypothesis.
This exceeds 5%, so we do not reject the null hypothesis in favor of
the alternative.
"""
args = _alexandergovern_input_validation(args, nan_policy)
if np.any([(arg == arg[0]).all() for arg in args]):
warnings.warn(AlexanderGovernConstantInputWarning())
return AlexanderGovernResult(np.nan, np.nan)
# The following formula numbers reference the equation described on
# page 92 by Alexander, Govern. Formulas 5, 6, and 7 describe other
# tests that serve as the basis for equation (8) but are not needed
# to perform the test.
# precalculate mean and length of each sample
lengths = np.array([ma.count(arg) if nan_policy == 'omit' else len(arg)
for arg in args])
means = np.array([np.mean(arg) for arg in args])
# (1) determine standard error of the mean for each sample
standard_errors = [np.std(arg, ddof=1) / np.sqrt(length)
for arg, length in zip(args, lengths)]
# (2) define a weight for each sample
inv_sq_se = 1 / np.square(standard_errors)
weights = inv_sq_se / np.sum(inv_sq_se)
# (3) determine variance-weighted estimate of the common mean
var_w = np.sum(weights * means)
# (4) determine one-sample t statistic for each group
t_stats = (means - var_w)/standard_errors
# calculate parameters to be used in transformation
v = lengths - 1
a = v - .5
b = 48 * a**2
c = (a * np.log(1 + (t_stats ** 2)/v))**.5
# (8) perform a normalizing transformation on t statistic
z = (c + ((c**3 + 3*c)/b) -
((4*c**7 + 33*c**5 + 240*c**3 + 855*c) /
(b**2*10 + 8*b*c**4 + 1000*b)))
# (9) calculate statistic
A = np.sum(np.square(z))
# "[the p value is determined from] central chi-square random deviates
# with k - 1 degrees of freedom". Alexander, Govern (94)
p = distributions.chi2.sf(A, len(args) - 1)
return AlexanderGovernResult(A, p)
def _alexandergovern_input_validation(args, nan_policy):
if len(args) < 2:
raise TypeError(f"2 or more inputs required, got {len(args)}")
# input arrays are flattened
args = [np.asarray(arg, dtype=float) for arg in args]
for i, arg in enumerate(args):
if np.size(arg) <= 1:
raise ValueError("Input sample size must be greater than one.")
if arg.ndim != 1:
raise ValueError("Input samples must be one-dimensional")
if np.isinf(arg).any():
raise ValueError("Input samples must be finite.")
contains_nan, nan_policy = _contains_nan(arg, nan_policy=nan_policy)
if contains_nan and nan_policy == 'omit':
args[i] = ma.masked_invalid(arg)
return args
AlexanderGovernResult = make_dataclass("AlexanderGovernResult", ("statistic",
"pvalue"))
class AlexanderGovernConstantInputWarning(RuntimeWarning):
"""Warning generated by `alexandergovern` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the statistic is not defined.")
self.args = (msg,)
class PearsonRConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficient "
"is not defined.")
self.args = (msg,)
class PearsonRNearConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is nearly constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is nearly constant; the computed "
"correlation coefficient may be inaccurate.")
self.args = (msg,)
def pearsonr(x, y):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
Returns
-------
r : float
Pearson's correlation coefficient.
p-value : float
Two-tailed p-value.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector x and :math:`m_y` is
the mean of the vector y.
Under the assumption that x and y are drawn from
independent normal distributions (so the population correlation coefficient
is 0), the probability density function of the sample correlation
coefficient r is ([1]_, [2]_):
.. math::
f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)}
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. The p-value
roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. More precisely, for a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> from scipy import stats
>>> stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
There is a linear dependence between x and y if y = a + b*x + e, where
a,b are constants and e is a random error term, assumed to be independent
of x. For simplicity, assume that x is standard normal, a=0, b=1 and let
e follow a normal distribution with mean zero and standard deviation s>0.
>>> s = 0.5
>>> x = stats.norm.rvs(size=500)
>>> e = stats.norm.rvs(scale=s, size=500)
>>> y = x + e
>>> stats.pearsonr(x, y)
(0.9029601878969703, 8.428978827629898e-185) # may vary
This should be close to the exact value given by
>>> 1/np.sqrt(1 + s**2)
0.8944271909999159
For s=0.5, we observe a high level of correlation. In general, a large
variance of the noise reduces the correlation, while the correlation
approaches one as the variance of the error goes to zero.
It is important to keep in mind that no correlation does not imply
independence unless (x, y) is jointly normal. Correlation can even be zero
when there is a very simple dependence structure: if X follows a
standard normal distribution, let y = abs(x). Note that the correlation
between x and y is zero. Indeed, since the expectation of x is zero,
cov(x, y) = E[x*y]. By definition, this equals E[x*abs(x)] which is zero
by symmetry. The following lines of code illustrate this observation:
>>> y = np.abs(x)
>>> stats.pearsonr(x, y)
(-0.016172891856853524, 0.7182823678751942) # may vary
A non-zero correlation coefficient can be misleading. For example, if X has
a standard normal distribution, define y = x if x < 0 and y = 0 otherwise.
A simple calculation shows that corr(x, y) = sqrt(2/Pi) = 0.797...,
implying a high level of correlation:
>>> y = np.where(x < 0, x, 0)
>>> stats.pearsonr(x, y)
(0.8537091583771509, 3.183461621422181e-143) # may vary
This is unintuitive since there is no dependence of x and y if x is larger
than zero which happens in about half of the cases if we sample x and y.
"""
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
x = np.asarray(x)
y = np.asarray(y)
# If an input is constant, the correlation coefficient is not defined.
if (x == x[0]).all() or (y == y[0]).all():
warnings.warn(PearsonRConstantInputWarning())
return np.nan, np.nan
# dtype is the data type for the calculations. This expression ensures
# that the data type is at least 64 bit floating point. It might have
# more precision if the input is, for example, np.longdouble.
dtype = type(1.0 + x[0] + y[0])
if n == 2:
return dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0])), 1.0
xmean = x.mean(dtype=dtype)
ymean = y.mean(dtype=dtype)
# By using `astype(dtype)`, we ensure that the intermediate calculations
# use at least 64 bit floating point.
xm = x.astype(dtype) - xmean
ym = y.astype(dtype) - ymean
# Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()),
# scipy.linalg.norm(xm) does not overflow if xm is, for example,
# [-5e210, 5e210, 3e200, -3e200]
normxm = linalg.norm(xm)
normym = linalg.norm(ym)
threshold = 1e-13
if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn(PearsonRNearConstantInputWarning())
r = np.dot(xm/normxm, ym/normym)
# Presumably, if abs(r) > 1, then it is only some small artifact of
# floating point arithmetic.
r = max(min(r, 1.0), -1.0)
# As explained in the docstring, the p-value can be computed as
# p = 2*dist.cdf(-abs(r))
# where dist is the beta distribution on [-1, 1] with shape parameters
# a = b = n/2 - 1. `special.btdtr` is the CDF for the beta distribution
# on [0, 1]. To use it, we make the transformation x = (r + 1)/2; the
# shape parameters do not change. Then -abs(r) used in `cdf(-abs(r))`
# becomes x = (-abs(r) + 1)/2 = 0.5*(1 - abs(r)). (r is cast to float64
# to avoid a TypeError raised by btdtr when r is higher precision.)
ab = n/2 - 1
prob = 2*special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Perform a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements must be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
See the Notes for more details.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table. This can be used as an alternative to
`fisher_exact` when the numbers in the table are large.
barnard_exact : Barnard's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
boschloo_exact : Boschloo's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
Notes
-----
*Null hypothesis and p-values*
The null hypothesis is that the input table is from the hypergeometric
distribution with parameters (as used in `hypergeom`)
``M = a + b + c + d``, ``n = a + b`` and ``N = a + c``, where the
input table is ``[[a, b], [c, d]]``. This distribution has support
``max(0, N + n - M) <= x <= min(N, n)``, or, in terms of the values
in the input table, ``min(0, a - d) <= x <= a + min(b, c)``. ``x``
can be interpreted as the upper-left element of a 2x2 table, so the
tables in the distribution have form::
[ x n - x ]
[N - x M - (n + N) + x]
For example, if::
table = [6 2]
[1 4]
then the support is ``2 <= x <= 7``, and the tables in the distribution
are::
[2 6] [3 5] [4 4] [5 3] [6 2] [7 1]
[5 0] [4 1] [3 2] [2 3] [1 4] [0 5]
The probability of each table is given by the hypergeometric distribution
``hypergeom.pmf(x, M, n, N)``. For this example, these are (rounded to
three significant digits)::
x 2 3 4 5 6 7
p 0.0163 0.163 0.408 0.326 0.0816 0.00466
These can be computed with::
>>> from scipy.stats import hypergeom
>>> table = np.array([[6, 2], [1, 4]])
>>> M = table.sum()
>>> n = table[0].sum()
>>> N = table[:, 0].sum()
>>> start, end = hypergeom.support(M, n, N)
>>> hypergeom.pmf(np.arange(start, end+1), M, n, N)
array([0.01631702, 0.16317016, 0.40792541, 0.32634033, 0.08158508,
0.004662 ])
The two-sided p-value is the probability that, under the null hypothesis,
a random table would have a probability equal to or less than the
probability of the input table. For our example, the probability of
the input table (where ``x = 6``) is 0.0816. The x values where the
probability does not exceed this are 2, 6 and 7, so the two-sided p-value
is ``0.0163 + 0.0816 + 0.00466 ~= 0.10256``::
>>> from scipy.stats import fisher_exact
>>> oddsr, p = fisher_exact(table, alternative='two-sided')
>>> p
0.10256410256410257
The one-sided p-value for ``alternative='greater'`` is the probability
that a random table has ``x >= a``, which in our example is ``x >= 6``,
or ``0.0816 + 0.00466 ~= 0.08626``::
>>> oddsr, p = fisher_exact(table, alternative='greater')
>>> p
0.08624708624708627
This is equivalent to computing the survival function of the
distribution at ``x = 5`` (one less than ``x`` from the input table,
because we want to include the probability of ``x = 6`` in the sum)::
>>> hypergeom.sf(5, M, n, N)
0.08624708624708627
For ``alternative='less'``, the one-sided p-value is the probability
that a random table has ``x <= a``, (i.e. ``x <= 6`` in our example),
or ``0.0163 + 0.163 + 0.408 + 0.326 + 0.0816 ~= 0.9949``::
>>> oddsr, p = fisher_exact(table, alternative='less')
>>> p
0.9953379953379957
This is equivalent to computing the cumulative distribution function
of the distribution at ``x = 6``:
>>> hypergeom.cdf(6, M, n, N)
0.9953379953379957
*Odds ratio*
The calculated odds ratio is different from the one R uses. This SciPy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> from scipy.stats import fisher_exact
>>> oddsratio, pvalue = fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
# int32 is not enough for the algorithm
c = np.asarray(table, dtype=np.int64)
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1]
n2 = c[1, 0] + c[1, 1]
n = c[0, 0] + c[1, 0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin halves in two-sided test."""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and \
hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and \
hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1])
elif alternative == 'two-sided':
mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0, 0] < mode:
plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
pvalue = min(pvalue, 1.0)
return oddsratio, pvalue
class SpearmanRConstantInputWarning(RuntimeWarning):
"""Warning generated by `spearmanr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficient "
"is not defined.")
self.args = (msg,)
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate',
alternative='two-sided'):
"""Calculate a Spearman correlation coefficient with associated p-value.
The Spearman rank-order correlation coefficient is a nonparametric measure
of the monotonicity of the relationship between two datasets. Unlike the
Pearson correlation, the Spearman correlation does not assume that both
datasets are normally distributed. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Correlations of -1 or +1 imply an exact monotonic relationship. Positive
correlations imply that as x increases, so does y. Negative correlations
imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the correlation is nonzero
* 'less': the correlation is negative (less than zero)
* 'greater': the correlation is positive (greater than zero)
.. versionadded:: 1.7.0
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in ``a``
and ``b`` combined.
pvalue : float
The p-value for a hypothesis test whose null hypotheisis
is that two sets of data are uncorrelated. See `alternative` above
for alternative hypotheses. `pvalue` has the same
shape as `correlation`.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
SpearmanrResult(correlation=0.82078..., pvalue=0.08858...)
>>> rng = np.random.default_rng()
>>> x2n = rng.standard_normal((100, 2))
>>> y2n = rng.standard_normal((100, 2))
>>> stats.spearmanr(x2n)
SpearmanrResult(correlation=-0.07960396039603959, pvalue=0.4311168705769747)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
SpearmanrResult(correlation=-0.07960396039603959, pvalue=0.4311168705769747)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , -0.07960396, -0.08314431, 0.09662166],
[-0.07960396, 1. , -0.14448245, 0.16738074],
[-0.08314431, -0.14448245, 1. , 0.03234323],
[ 0.09662166, 0.16738074, 0.03234323, 1. ]])
>>> pval
array([[0. , 0.43111687, 0.41084066, 0.33891628],
[0.43111687, 0. , 0.15151618, 0.09600687],
[0.41084066, 0.15151618, 0. , 0.74938561],
[0.33891628, 0.09600687, 0.74938561, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , -0.07960396, -0.08314431, 0.09662166],
[-0.07960396, 1. , -0.14448245, 0.16738074],
[-0.08314431, -0.14448245, 1. , 0.03234323],
[ 0.09662166, 0.16738074, 0.03234323, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
SpearmanrResult(correlation=0.044981624540613524, pvalue=0.5270803651336189)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
SpearmanrResult(correlation=0.044981624540613524, pvalue=0.5270803651336189)
>>> rng = np.random.default_rng()
>>> xint = rng.integers(10, size=(100, 2))
>>> stats.spearmanr(xint)
SpearmanrResult(correlation=0.09800224850707953, pvalue=0.3320271757932076)
"""
if axis is not None and axis > 1:
raise ValueError("spearmanr only handles 1-D or 2-D arrays, "
"supplied axis argument {}, please use only "
"values 0, 1 or None for axis".format(axis))
a, axisout = _chk_asarray(a, axis)
if a.ndim > 2:
raise ValueError("spearmanr only handles 1-D or 2-D arrays")
if b is None:
if a.ndim < 2:
raise ValueError("`spearmanr` needs at least 2 "
"variables to compare")
else:
# Concatenate a and b, so that we now only have to handle the case
# of a 2-D `a`.
b, _ = _chk_asarray(b, axis)
if axisout == 0:
a = np.column_stack((a, b))
else:
a = np.row_stack((a, b))
n_vars = a.shape[1 - axisout]
n_obs = a.shape[axisout]
if n_obs <= 1:
# Handle empty arrays or single observations.
return SpearmanrResult(np.nan, np.nan)
if axisout == 0:
if (a[:, 0][0] == a[:, 0]).all() or (a[:, 1][0] == a[:, 1]).all():
# If an input is constant, the correlation coefficient
# is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
else: # case when axisout == 1 b/c a is 2 dim only
if (a[0, :][0] == a[0, :]).all() or (a[1, :][0] == a[1, :]).all():
# If an input is constant, the correlation coefficient
# is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
variable_has_nan = np.zeros(n_vars, dtype=bool)
if a_contains_nan:
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy,
alternative=alternative)
elif nan_policy == 'propagate':
if a.ndim == 1 or n_vars <= 2:
return SpearmanrResult(np.nan, np.nan)
else:
# Keep track of variables with NaNs, set the outputs to NaN
# only for those variables
variable_has_nan = np.isnan(a).any(axis=axisout)
a_ranked = np.apply_along_axis(rankdata, axisout, a)
rs = np.corrcoef(a_ranked, rowvar=axisout)
dof = n_obs - 2 # degrees of freedom
# rs can have elements equal to 1, so avoid zero division warnings
with np.errstate(divide='ignore'):
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0))
t, prob = _ttest_finish(dof, t, alternative)
# For backwards compatibility, return scalars when comparing 2 columns
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
rs[variable_has_nan, :] = np.nan
rs[:, variable_has_nan] = np.nan
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""Calculate a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value.
pvalue : float
Two-sided p-value.
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr`.
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef:
Statistics Reference Online (eds N. Balakrishnan, et al.), 2014.
:doi:`10.1002/9781118445112.stat06227`
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate',
method='auto', variant='b', alternative='two-sided'):
"""Calculate Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, and values close to -1
indicate strong disagreement. This implements two variants of Kendall's
tau: tau-b (the default) and tau-c (also known as Stuart's tau-c). These
differ only in how they are normalized to lie within the range -1 to 1;
the hypothesis tests (their p-values) are identical. Kendall's original
tau-a is not implemented separately because both tau-b and tau-c reduce
to tau-a in the absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they
will be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
method : {'auto', 'asymptotic', 'exact'}, optional
Defines which method is used to calculate the p-value [5]_.
The following options are available (default is 'auto'):
* 'auto': selects the appropriate method based on a trade-off
between speed and accuracy
* 'asymptotic': uses a normal approximation valid for large samples
* 'exact': computes the exact p-value, but can only be used if no ties
are present. As the sample size increases, the 'exact' computation
time may grow and the result may lose some precision.
variant : {'b', 'c'}, optional
Defines which variant of Kendall's tau is returned. Default is 'b'.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the rank correlation is nonzero
* 'less': the rank correlation is negative (less than zero)
* 'greater': the rank correlation is positive (greater than zero)
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See Also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau_b = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
tau_c = 2 (P - Q) / (n**2 * (m - 1) / m)
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U. n is the total number of samples, and m is the
number of unique values in either `x` or `y`, whichever is smaller.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
.. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
Charles Griffin & Co., 1970.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same "
f"size, found x-size {x.size} and y-size {y.size}")
elif not x.size or not y.size:
# Return NaN if arrays are empty
return KendalltauResult(np.nan, np.nan)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
if variant == 'b':
return mstats_basic.kendalltau(x, y, method=method, use_ties=True,
alternative=alternative)
else:
message = ("nan_policy='omit' is currently compatible only with "
"variant='b'.")
raise ValueError(message)
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
if variant == 'b':
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
elif variant == 'c':
minclasses = min(len(set(x)), len(set(y)))
tau = 2*con_minus_dis / (size**2 * (minclasses-1)/minclasses)
else:
raise ValueError(f"Unknown variant of the method chosen: {variant}. "
"variant must be 'b' or 'c'.")
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
# The p-value calculation is the same for all variants since the p-value
# depends only on con_minus_dis.
if method == 'exact' and (xtie != 0 or ytie != 0):
raise ValueError("Ties found, exact method cannot be used.")
if method == 'auto':
if (xtie == 0 and ytie == 0) and (size <= 33 or
min(dis, tot-dis) <= 1):
method = 'exact'
else:
method = 'asymptotic'
if xtie == 0 and ytie == 0 and method == 'exact':
pvalue = mstats_basic._kendall_p_exact(size, tot-dis, alternative)
elif method == 'asymptotic':
# con_minus_dis is approx normally distributed with this variance [3]_
m = size * (size - 1.)
var = ((m * (2*size + 5) - x1 - y1) / 18 +
(2 * xtie * ytie) / m + x0 * y0 / (9 * m * (size - 2)))
z = con_minus_dis / np.sqrt(var)
_, pvalue = _normtest_finish(z, alternative)
else:
raise ValueError(f"Unknown method {method} specified. Use 'auto', "
"'exact' or 'asymptotic'.")
return KendalltauResult(tau, pvalue)
WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue'))
def weightedtau(x, y, rank=True, weigher=None, additive=True):
r"""Compute a weighted version of Kendall's :math:`\tau`.
The weighted :math:`\tau` is a weighted version of Kendall's
:math:`\tau` in which exchanges of high weight are more influential than
exchanges of low weight. The default parameters compute the additive
hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
been shown to provide the best balance between important and
unimportant elements [1]_.
The weighting is defined by means of a rank array, which assigns a
nonnegative rank to each element (higher importance ranks being
associated with smaller values, e.g., 0 is the highest possible rank),
and a weigher function, which assigns a weight based on the rank to
each element. The weight of an exchange is then the sum or the product
of the weights of the ranks of the exchanged elements. The default
parameters compute :math:`\tau_\mathrm h`: an exchange between
elements with rank :math:`r` and :math:`s` (starting from zero) has
weight :math:`1/(r+1) + 1/(s+1)`.
Specifying a rank array is meaningful only if you have in mind an
external criterion of importance. If, as it usually happens, you do
not have in mind a specific rank, the weighted :math:`\tau` is
defined by averaging the values obtained using the decreasing
lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
behavior with default parameters. Note that the convention used
here for ranking (lower values imply higher importance) is opposite
to that used by other SciPy statistical functions.
Parameters
----------
x, y : array_like
Arrays of scores, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
rank : array_like of ints or bool, optional
A nonnegative rank assigned to each element. If it is None, the
decreasing lexicographical rank by (`x`, `y`) will be used: elements of
higher rank will be those with larger `x`-values, using `y`-values to
break ties (in particular, swapping `x` and `y` will give a different
result). If it is False, the element indices will be used
directly as ranks. The default is True, in which case this
function returns the average of the values obtained using the
decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
weigher : callable, optional
The weigher function. Must map nonnegative integers (zero
representing the most important element) to a nonnegative weight.
The default, None, provides hyperbolic weighing, that is,
rank :math:`r` is mapped to weight :math:`1/(r+1)`.
additive : bool, optional
If True, the weight of an exchange is computed by adding the
weights of the ranks of the exchanged elements; otherwise, the weights
are multiplied. The default is True.
Returns
-------
correlation : float
The weighted :math:`\tau` correlation index.
pvalue : float
Presently ``np.nan``, as the null statistics is unknown (even in the
additive hyperbolic case).
See Also
--------
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
This function uses an :math:`O(n \log n)`, mergesort-based algorithm
[1]_ that is a weighted extension of Knight's algorithm for Kendall's
:math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
between rankings without ties (i.e., permutations) by setting
`additive` and `rank` to False, as the definition given in [1]_ is a
generalization of Shieh's.
NaNs are considered the smallest possible score.
.. versionadded:: 0.19.0
References
----------
.. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
ties", Proceedings of the 24th international conference on World
Wide Web, pp. 1166-1176, ACM, 2015.
.. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association,
Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
.. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
Examples
--------
>>> from scipy import stats
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
>>> p_value
nan
>>> tau, p_value = stats.weightedtau(x, y, additive=False)
>>> tau
-0.62205716951801038
NaNs are considered the smallest possible score:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, np.nan]
>>> tau, _ = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
This is exactly Kendall's tau:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1)
>>> tau
-0.47140452079103173
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> stats.weightedtau(x, y, rank=None)
WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan)
>>> stats.weightedtau(y, x, rank=None)
WeightedTauResult(correlation=-0.7181341329699028, pvalue=nan)
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `weightedtau` must be "
"of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
if not x.size:
# Return NaN if arrays are empty
return WeightedTauResult(np.nan, np.nan)
# If there are NaNs we apply _toint64()
if np.isnan(np.sum(x)):
x = _toint64(x)
if np.isnan(np.sum(y)):
y = _toint64(y)
# Reduce to ranks unsupported types
if x.dtype != y.dtype:
if x.dtype != np.int64:
x = _toint64(x)
if y.dtype != np.int64:
y = _toint64(y)
else:
if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
x = _toint64(x)
y = _toint64(y)
if rank is True:
return WeightedTauResult((
_weightedrankedtau(x, y, None, weigher, additive) +
_weightedrankedtau(y, x, None, weigher, additive)
) / 2, np.nan)
if rank is False:
rank = np.arange(x.size, dtype=np.intp)
elif rank is not None:
rank = np.asarray(rank).ravel()
if rank.size != x.size:
raise ValueError(
"All inputs to `weightedtau` must be of the same size, "
"found x-size %s and rank-size %s" % (x.size, rank.size)
)
return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive),
np.nan)
# FROM MGCPY: https://github.com/neurodata/mgcpy
class _ParallelP:
"""Helper function to calculate parallel p-value."""
def __init__(self, x, y, random_states):
self.x = x
self.y = y
self.random_states = random_states
def __call__(self, index):
order = self.random_states[index].permutation(self.y.shape[0])
permy = self.y[order][:, order]
# calculate permuted stats, store in null distribution
perm_stat = _mgc_stat(self.x, permy)[0]
return perm_stat
def _perm_test(x, y, stat, reps=1000, workers=-1, random_state=None):
r"""Helper function that calculates the p-value. See below for uses.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)`.
stat : float
The sample test statistic.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is 1000 replications.
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel (uses
`multiprocessing.Pool <multiprocessing>`). Supply `-1` to use all cores
available to the Process. Alternatively supply a map-like callable,
such as `multiprocessing.Pool.map` for evaluating the population in
parallel. This evaluation is carried out as `workers(func, iterable)`.
Requires that `func` be pickleable.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
pvalue : float
The sample test p-value.
null_dist : list
The approximated null distribution.
"""
# generate seeds for each rep (change to new parallel random number
# capabilities in numpy >= 1.17+)
random_state = check_random_state(random_state)
random_states = [np.random.RandomState(rng_integers(random_state, 1 << 32,
size=4, dtype=np.uint32)) for _ in range(reps)]
# parallelizes with specified workers over number of reps and set seeds
parallelp = _ParallelP(x=x, y=y, random_states=random_states)
with MapWrapper(workers) as mapwrapper:
null_dist = np.array(list(mapwrapper(parallelp, range(reps))))
# calculate p-value and significant permutation map through list
pvalue = (null_dist >= stat).sum() / reps
# correct for a p-value of 0. This is because, with bootstrapping
# permutations, a p-value of 0 is incorrect
if pvalue == 0:
pvalue = 1 / reps
return pvalue, null_dist
def _euclidean_dist(x):
return cdist(x, x)
MGCResult = namedtuple('MGCResult', ('stat', 'pvalue', 'mgc_dict'))
def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000,
workers=1, is_twosamp=False, random_state=None):
r"""Computes the Multiscale Graph Correlation (MGC) test statistic.
Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for
one property (e.g. cloud density), and the :math:`l`-nearest neighbors for
the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is
called the "scale". A priori, however, it is not know which scales will be
most informative. So, MGC computes all distance pairs, and then efficiently
computes the distance correlations for all scales. The local correlations
illustrate which scales are relatively informative about the relationship.
The key, therefore, to successfully discover and decipher relationships
between disparate data modalities is to adaptively determine which scales
are the most informative, and the geometric implication for the most
informative scales. Doing so not only provides an estimate of whether the
modalities are related, but also provides insight into how the
determination was made. This is especially important in high-dimensional
data, where simple visualizations do not reveal relationships to the
unaided human eye. Characterizations of this implementation in particular
have been derived from and benchmarked within in [2]_.
Parameters
----------
x, y : ndarray
If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is
the number of samples and `p` and `q` are the number of dimensions,
then the MGC independence test will be run. Alternatively, ``x`` and
``y`` can have shapes ``(n, n)`` if they are distance or similarity
matrices, and ``compute_distance`` must be sent to ``None``. If ``x``
and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired
two-sample MGC test will be run.
compute_distance : callable, optional
A function that computes the distance or similarity among the samples
within each data matrix. Set to ``None`` if ``x`` and ``y`` are
already distance matrices. The default uses the euclidean norm metric.
If you are calling a custom function, either create the distance
matrix before-hand or create a function of the form
``compute_distance(x)`` where `x` is the data matrix for which
pairwise distances are calculated.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is ``1000``.
workers : int or map-like callable, optional
If ``workers`` is an int the population is subdivided into ``workers``
sections and evaluated in parallel (uses ``multiprocessing.Pool
<multiprocessing>``). Supply ``-1`` to use all cores available to the
Process. Alternatively supply a map-like callable, such as
``multiprocessing.Pool.map`` for evaluating the p-value in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
Requires that `func` be pickleable. The default is ``1``.
is_twosamp : bool, optional
If `True`, a two sample test will be run. If ``x`` and ``y`` have
shapes ``(n, p)`` and ``(m, p)``, this optional will be overridden and
set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes
``(n, p)`` and a two sample test is desired. The default is ``False``.
Note that this will not run if inputs are distance matrices.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
pvalue : float
The p-value obtained via permutation.
mgc_dict : dict
Contains additional useful additional returns containing the following
keys:
- mgc_map : ndarray
A 2D representation of the latent geometry of the relationship.
of the relationship.
- opt_scale : (int, int)
The estimated optimal scale as a `(x, y)` pair.
- null_dist : list
The null distribution derived from the permuted matrices
See Also
--------
pearsonr : Pearson correlation coefficient and p-value for testing
non-correlation.
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
Notes
-----
A description of the process of MGC and applications on neuroscience data
can be found in [1]_. It is performed using the following steps:
#. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and
modified to be mean zero columnwise. This results in two
:math:`n \times n` distance matrices :math:`A` and :math:`B` (the
centering and unbiased modification) [3]_.
#. For all values :math:`k` and :math:`l` from :math:`1, ..., n`,
* The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs
are calculated for each property. Here, :math:`G_k (i, j)` indicates
the :math:`k`-smallest values of the :math:`i`-th row of :math:`A`
and :math:`H_l (i, j)` indicates the :math:`l` smallested values of
the :math:`i`-th row of :math:`B`
* Let :math:`\circ` denotes the entry-wise matrix product, then local
correlations are summed and normalized using the following statistic:
.. math::
c^{kl} = \frac{\sum_{ij} A G_k B H_l}
{\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}}
#. The MGC test statistic is the smoothed optimal local correlation of
:math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)`
(which essentially set all isolated large correlations) as 0 and
connected large correlations the same as before, see [3]_.) MGC is,
.. math::
MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right)
\right)
The test statistic returns a value between :math:`(-1, 1)` since it is
normalized.
The p-value returned is calculated using a permutation test. This process
is completed by first randomly permuting :math:`y` to estimate the null
distribution and then calculating the probability of observing a test
statistic, under the null, at least as extreme as the observed test
statistic.
MGC requires at least 5 samples to run with reliable results. It can also
handle high-dimensional data sets.
In addition, by manipulating the input data matrices, the two-sample
testing problem can be reduced to the independence testing problem [4]_.
Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n`
:math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as
follows:
.. math::
X = [U | V] \in \mathcal{R}^{p \times (n + m)}
Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)}
Then, the MGC statistic can be calculated as normal. This methodology can
be extended to similar tests such as distance correlation [4]_.
.. versionadded:: 1.4.0
References
----------
.. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E.,
Maggioni, M., & Shen, C. (2019). Discovering and deciphering
relationships across disparate data modalities. ELife.
.. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A.,
Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019).
mgcpy: A Comprehensive High Dimensional Independence Testing Python
Package. :arXiv:`1907.02088`
.. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance
correlation to multiscale graph correlation. Journal of the American
Statistical Association.
.. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of
Distance and Kernel Methods for Hypothesis Testing.
:arXiv:`1806.05514`
Examples
--------
>>> from scipy.stats import multiscale_graphcorr
>>> x = np.arange(100)
>>> y = x
>>> stat, pvalue, _ = multiscale_graphcorr(x, y, workers=-1)
>>> '%.1f, %.3f' % (stat, pvalue)
'1.0, 0.001'
Alternatively,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y)
>>> '%.1f, %.3f' % (mgc.stat, mgc.pvalue)
'1.0, 0.001'
To run an unpaired two-sample test,
>>> x = np.arange(100)
>>> y = np.arange(79)
>>> mgc = multiscale_graphcorr(x, y)
>>> '%.3f, %.2f' % (mgc.stat, mgc.pvalue) # doctest: +SKIP
'0.033, 0.02'
or, if shape of the inputs are the same,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y, is_twosamp=True)
>>> '%.3f, %.1f' % (mgc.stat, mgc.pvalue) # doctest: +SKIP
'-0.008, 1.0'
"""
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
raise ValueError("x and y must be ndarrays")
# convert arrays of type (n,) to (n, 1)
if x.ndim == 1:
x = x[:, np.newaxis]
elif x.ndim != 2:
raise ValueError("Expected a 2-D array `x`, found shape "
"{}".format(x.shape))
if y.ndim == 1:
y = y[:, np.newaxis]
elif y.ndim != 2:
raise ValueError("Expected a 2-D array `y`, found shape "
"{}".format(y.shape))
nx, px = x.shape
ny, py = y.shape
# check for NaNs
_contains_nan(x, nan_policy='raise')
_contains_nan(y, nan_policy='raise')
# check for positive or negative infinity and raise error
if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0:
raise ValueError("Inputs contain infinities")
if nx != ny:
if px == py:
# reshape x and y for two sample testing
is_twosamp = True
else:
raise ValueError("Shape mismatch, x and y must have shape [n, p] "
"and [n, q] or have shape [n, p] and [m, p].")
if nx < 5 or ny < 5:
raise ValueError("MGC requires at least 5 samples to give reasonable "
"results.")
# convert x and y to float
x = x.astype(np.float64)
y = y.astype(np.float64)
# check if compute_distance_matrix if a callable()
if not callable(compute_distance) and compute_distance is not None:
raise ValueError("Compute_distance must be a function.")
# check if number of reps exists, integer, or > 0 (if under 1000 raises
# warning)
if not isinstance(reps, int) or reps < 0:
raise ValueError("Number of reps must be an integer greater than 0.")
elif reps < 1000:
msg = ("The number of replications is low (under 1000), and p-value "
"calculations may be unreliable. Use the p-value result, with "
"caution!")
warnings.warn(msg, RuntimeWarning)
if is_twosamp:
if compute_distance is None:
raise ValueError("Cannot run if inputs are distance matrices")
x, y = _two_sample_transform(x, y)
if compute_distance is not None:
# compute distance matrices for x and y
x = compute_distance(x)
y = compute_distance(y)
# calculate MGC stat
stat, stat_dict = _mgc_stat(x, y)
stat_mgc_map = stat_dict["stat_mgc_map"]
opt_scale = stat_dict["opt_scale"]
# calculate permutation MGC p-value
pvalue, null_dist = _perm_test(x, y, stat, reps=reps, workers=workers,
random_state=random_state)
# save all stats (other than stat/p-value) in dictionary
mgc_dict = {"mgc_map": stat_mgc_map,
"opt_scale": opt_scale,
"null_dist": null_dist}
return MGCResult(stat, pvalue, mgc_dict)
def _mgc_stat(distx, disty):
r"""Helper function that calculates the MGC stat. See above for use.
Parameters
----------
distx, disty : ndarray
`distx` and `disty` have shapes `(n, p)` and `(n, q)` or
`(n, n)` and `(n, n)`
if distance matrices.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
stat_dict : dict
Contains additional useful additional returns containing the following
keys:
- stat_mgc_map : ndarray
MGC-map of the statistics.
- opt_scale : (float, float)
The estimated optimal scale as a `(x, y)` pair.
"""
# calculate MGC map and optimal scale
stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc')
n, m = stat_mgc_map.shape
if m == 1 or n == 1:
# the global scale at is the statistic calculated at maximial nearest
# neighbors. There is not enough local scale to search over, so
# default to global scale
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = m * n
else:
samp_size = len(distx) - 1
# threshold to find connected region of significant local correlations
sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size)
# maximum within the significant region
stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map)
stat_dict = {"stat_mgc_map": stat_mgc_map,
"opt_scale": opt_scale}
return stat, stat_dict
def _threshold_mgc_map(stat_mgc_map, samp_size):
r"""
Finds a connected region of significance in the MGC-map by thresholding.
Parameters
----------
stat_mgc_map : ndarray
All local correlations within `[-1,1]`.
samp_size : int
The sample size of original data.
Returns
-------
sig_connect : ndarray
A binary matrix with 1's indicating the significant region.
"""
m, n = stat_mgc_map.shape
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance. Threshold is based on a beta
# approximation.
per_sig = 1 - (0.02 / samp_size) # Percentile to consider as significant
threshold = samp_size * (samp_size - 3)/4 - 1/2 # Beta approximation
threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1
# the global scale at is the statistic calculated at maximial nearest
# neighbors. Threshold is the maximum on the global and local scales
threshold = max(threshold, stat_mgc_map[m - 1][n - 1])
# find the largest connected component of significant correlations
sig_connect = stat_mgc_map > threshold
if np.sum(sig_connect) > 0:
sig_connect, _ = _measurements.label(sig_connect)
_, label_counts = np.unique(sig_connect, return_counts=True)
# skip the first element in label_counts, as it is count(zeros)
max_label = np.argmax(label_counts[1:]) + 1
sig_connect = sig_connect == max_label
else:
sig_connect = np.array([[False]])
return sig_connect
def _smooth_mgc_map(sig_connect, stat_mgc_map):
"""Finds the smoothed maximal within the significant region R.
If area of R is too small it returns the last local correlation. Otherwise,
returns the maximum within significant_connected_region.
Parameters
----------
sig_connect : ndarray
A binary matrix with 1's indicating the significant region.
stat_mgc_map : ndarray
All local correlations within `[-1, 1]`.
Returns
-------
stat : float
The sample MGC statistic within `[-1, 1]`.
opt_scale: (float, float)
The estimated optimal scale as an `(x, y)` pair.
"""
m, n = stat_mgc_map.shape
# the global scale at is the statistic calculated at maximial nearest
# neighbors. By default, statistic and optimal scale are global.
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = [m, n]
if np.linalg.norm(sig_connect) != 0:
# proceed only when the connected region's area is sufficiently large
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance
if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n):
max_corr = max(stat_mgc_map[sig_connect])
# find all scales within significant_connected_region that maximize
# the local correlation
max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect)
if max_corr >= stat:
stat = max_corr
k, l = max_corr_index
one_d_indices = k * n + l # 2D to 1D indexing
k = np.max(one_d_indices) // n
l = np.max(one_d_indices) % n
opt_scale = [k+1, l+1] # adding 1s to match R indexing
return stat, opt_scale
def _two_sample_transform(u, v):
"""Helper function that concatenates x and y for two sample MGC stat.
See above for use.
Parameters
----------
u, v : ndarray
`u` and `v` have shapes `(n, p)` and `(m, p)`.
Returns
-------
x : ndarray
Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape
`(2n, p)`.
y : ndarray
Label matrix for `x` where 0 refers to samples that comes from `u` and
1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`.
"""
nx = u.shape[0]
ny = v.shape[0]
x = np.concatenate([u, v], axis=0)
y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1)
return x, y
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate',
alternative="two-sided"):
"""Calculate the T-test for the mean of ONE group of scores.
This is a test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
Sample observation.
popmean : float or array_like
Expected value in null hypothesis. If array_like, then it must have the
same shape as `a` excluding the axis dimension.
axis : int or None, optional
Axis along which to compute test; default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the mean of the underlying distribution of the sample
is different than the given population mean (`popmean`)
* 'less': the mean of the underlying distribution of the sample is
less than the given population mean (`popmean`)
* 'greater': the mean of the underlying distribution of the sample is
greater than the given population mean (`popmean`)
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50, 2), random_state=rng)
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs, 5.0)
Ttest_1sampResult(statistic=array([-2.09794637, -1.75977004]), pvalue=array([0.04108952, 0.08468867]))
>>> stats.ttest_1samp(rvs, 0.0)
Ttest_1sampResult(statistic=array([1.64495065, 1.62095307]), pvalue=array([0.10638103, 0.11144602]))
Examples using axis and non-scalar dimension for population mean.
>>> result = stats.ttest_1samp(rvs, [5.0, 0.0])
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs.T, [5.0, 0.0], axis=1)
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs, [[5.0], [0.0]])
>>> result.statistic
array([[-2.09794637, -1.75977004],
[ 1.64495065, 1.62095307]])
>>> result.pvalue
array([[0.04108952, 0.08468867],
[0.10638103, 0.11144602]])
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis, alternative)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t, alternative):
"""Common code between all 3 t-test functions."""
# We use ``stdtr`` directly here as it handles the case when ``nan``
# values are present in the data and masked arrays are passed
# while ``t.cdf`` emits runtime warnings. This way ``_ttest_finish``
# can be shared between the ``stats`` and ``mstats`` versions.
if alternative == 'less':
pval = special.stdtr(df, t)
elif alternative == 'greater':
pval = special.stdtr(df, -t)
elif alternative == 'two-sided':
pval = special.stdtr(df, -np.abs(t))*2
else:
raise ValueError("alternative must be "
"'less', 'greater' or 'two-sided'")
if t.ndim == 0:
t = t[()]
if pval.ndim == 0:
pval = pval[()]
return t, pval
def _ttest_ind_from_stats(mean1, mean2, denom, df, alternative):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True, alternative="two-sided"):
r"""
T-test for means of two independent samples from descriptive statistics.
This is a test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2.
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions are unequal.
* 'less': the mean of the first distribution is less than the
mean of the second distribution.
* 'greater': the mean of the first distribution is greater than the
mean of the second distribution.
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
The calculated t-statistics.
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
Suppose we instead have binary data and would like to apply a t-test to
compare the proportion of 1s in two independent groups::
Number of Sample Sample
Size ones Mean Variance
Sample 1 150 30 0.2 0.16
Sample 2 200 45 0.225 0.174375
The sample mean :math:`\hat{p}` is the proportion of ones in the sample
and the variance for a binary observation is estimated by
:math:`\hat{p}(1-\hat{p})`.
>>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150,
... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200)
Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874)
For comparison, we could compute the t statistic and p-value using
arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above.
>>> group1 = np.array([1]*30 + [0]*(150-30))
>>> group2 = np.array([1]*45 + [0]*(200-45))
>>> ttest_ind(group1, group2)
Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258)
"""
mean1 = np.asarray(mean1)
std1 = np.asarray(std1)
mean2 = np.asarray(mean2)
std2 = np.asarray(std2)
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative)
return Ttest_indResult(*res)
def _ttest_nans(a, b, axis, namedtuple_type):
"""
Generate an array of `nan`, with shape determined by `a`, `b` and `axis`.
This function is used by ttest_ind and ttest_rel to create the return
value when one of the inputs has size 0.
The shapes of the arrays are determined by dropping `axis` from the
shapes of `a` and `b` and broadcasting what is left.
The return value is a named tuple of the type given in `namedtuple_type`.
Examples
--------
>>> a = np.zeros((9, 2))
>>> b = np.zeros((5, 1))
>>> _ttest_nans(a, b, 0, Ttest_indResult)
Ttest_indResult(statistic=array([nan, nan]), pvalue=array([nan, nan]))
>>> a = np.zeros((3, 0, 9))
>>> b = np.zeros((1, 10))
>>> stat, p = _ttest_nans(a, b, -1, Ttest_indResult)
>>> stat
array([], shape=(3, 0), dtype=float64)
>>> p
array([], shape=(3, 0), dtype=float64)
>>> a = np.zeros(10)
>>> b = np.zeros(7)
>>> _ttest_nans(a, b, 0, Ttest_indResult)
Ttest_indResult(statistic=nan, pvalue=nan)
"""
shp = _broadcast_shapes_with_dropped_axis(a, b, axis)
if len(shp) == 0:
t = np.nan
p = np.nan
else:
t = np.full(shp, fill_value=np.nan)
p = t.copy()
return namedtuple_type(t, p)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate',
permutations=None, random_state=None, alternative="two-sided",
trim=0):
"""
Calculate the T-test for the means of *two independent* samples of scores.
This is a test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
The 'omit' option is not currently available for permutation tests or
one-sided asympyotic tests.
permutations : non-negative int, np.inf, or None (default), optional
If 0 or None (default), use the t-distribution to calculate p-values.
Otherwise, `permutations` is the number of random permutations that
will be used to estimate p-values using a permutation test. If
`permutations` equals or exceeds the number of distinct partitions of
the pooled data, an exact test is performed instead (i.e. each
distinct partition is used exactly once). See Notes for details.
.. versionadded:: 1.7.0
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Pseudorandom number generator state used to generate permutations
(used only when `permutations` is not None).
.. versionadded:: 1.7.0
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions underlying the samples
are unequal.
* 'less': the mean of the distribution underlying the first sample
is less than the mean of the distribution underlying the second
sample.
* 'greater': the mean of the distribution underlying the first
sample is greater than the mean of the distribution underlying
the second sample.
.. versionadded:: 1.6.0
trim : float, optional
If nonzero, performs a trimmed (Yuen's) t-test.
Defines the fraction of elements to be trimmed from each end of the
input samples. If 0 (default), no elements will be trimmed from either
side. The number of trimmed elements from each tail is the floor of the
trim times the number of elements. Valid range is [0, .5).
.. versionadded:: 1.7
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The p-value.
Notes
-----
Suppose we observe two independent samples, e.g. flower petal lengths, and
we are considering whether the two samples were drawn from the same
population (e.g. the same species of flower or two species with similar
petal characteristics) or two different populations.
The t-test quantifies the difference between the arithmetic means
of the two samples. The p-value quantifies the probability of observing
as or more extreme values assuming the null hypothesis, that the
samples are drawn from populations with the same population means, is true.
A p-value larger than a chosen threshold (e.g. 5% or 1%) indicates that
our observation is not so unlikely to have occurred by chance. Therefore,
we do not reject the null hypothesis of equal population means.
If the p-value is smaller than our threshold, then we have evidence
against the null hypothesis of equal population means.
By default, the p-value is determined by comparing the t-statistic of the
observed data against a theoretical t-distribution.
When ``1 < permutations < binom(n, k)``, where
* ``k`` is the number of observations in `a`,
* ``n`` is the total number of observations in `a` and `b`, and
* ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``),
the data are pooled (concatenated), randomly assigned to either group `a`
or `b`, and the t-statistic is calculated. This process is performed
repeatedly (`permutation` times), generating a distribution of the
t-statistic under the null hypothesis, and the t-statistic of the observed
data is compared to this distribution to determine the p-value. When
``permutations >= binom(n, k)``, an exact test is performed: the data are
partitioned between the groups in each distinct way exactly once.
The permutation test can be computationally expensive and not necessarily
more accurate than the analytical test, but it does not make strong
assumptions about the shape of the underlying distribution.
Use of trimming is commonly referred to as the trimmed t-test. At times
called Yuen's t-test, this is an extension of Welch's t-test, with the
difference being the use of winsorized means in calculation of the variance
and the trimmed sample size in calculation of the statistic. Trimming is
recommended if the underlying distribution is long-tailed or contaminated
with outliers [4]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
.. [3] http://en.wikipedia.org/wiki/Resampling_%28statistics%29
.. [4] Yuen, Karen K. "The Two-Sample Trimmed t for Unequal Population
Variances." Biometrika, vol. 61, no. 1, 1974, pp. 165-170. JSTOR,
www.jstor.org/stable/2334299. Accessed 30 Mar. 2021.
.. [5] Yuen, Karen K., and W. J. Dixon. "The Approximate Behaviour and
Performance of the Two-Sample Trimmed t." Biometrika, vol. 60,
no. 2, 1973, pp. 369-374. JSTOR, www.jstor.org/stable/2334550.
Accessed 30 Mar. 2021.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> rvs2 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs2)
Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952038870015)
>>> stats.ttest_ind(rvs1, rvs2, equal_var=False)
Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952553131064)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs3)
Ttest_indResult(statistic=-1.6370984482905417, pvalue=0.1019251574705033)
>>> stats.ttest_ind(rvs1, rvs3, equal_var=False)
Ttest_indResult(statistic=-1.637098448290542, pvalue=0.10202110497954867)
When ``n1 != n2``, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs4)
Ttest_indResult(statistic=-1.9481646859513422, pvalue=0.05186270935842703)
>>> stats.ttest_ind(rvs1, rvs4, equal_var=False)
Ttest_indResult(statistic=-1.3146566100751664, pvalue=0.1913495266513811)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs5)
Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0046418707568707885)
>>> stats.ttest_ind(rvs1, rvs5, equal_var=False)
Ttest_indResult(statistic=-1.8686598649188084, pvalue=0.06434714193919686)
When performing a permutation test, more permutations typically yields
more accurate results. Use a ``np.random.Generator`` to ensure
reproducibility:
>>> stats.ttest_ind(rvs1, rvs5, permutations=10000,
... random_state=rng)
Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0052)
Take these two samples, one of which has an extreme tail.
>>> a = (56, 128.6, 12, 123.8, 64.34, 78, 763.3)
>>> b = (1.1, 2.9, 4.2)
Use the `trim` keyword to perform a trimmed (Yuen) t-test. For example,
using 20% trimming, ``trim=.2``, the test will reduce the impact of one
(``np.floor(trim*len(a))``) element from each tail of sample `a`. It will
have no effect on sample `b` because ``np.floor(trim*len(b))`` is 0.
>>> stats.ttest_ind(a, b, trim=.2)
Ttest_indResult(statistic=3.4463884028073513,
pvalue=0.01369338726499547)
"""
if not (0 <= trim < .5):
raise ValueError("Trimming percentage should be 0 <= `trim` < .5.")
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
if permutations or trim != 0:
raise ValueError("nan-containing/masked inputs with "
"nan_policy='omit' are currently not "
"supported by permutation tests or "
"trimmed tests.")
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var, alternative)
if a.size == 0 or b.size == 0:
return _ttest_nans(a, b, axis, Ttest_indResult)
if permutations is not None and permutations != 0:
if trim != 0:
raise ValueError("Permutations are currently not supported "
"with trimming.")
if permutations < 0 or (np.isfinite(permutations) and
int(permutations) != permutations):
raise ValueError("Permutations must be a non-negative integer.")
res = _permutation_ttest(a, b, permutations=permutations,
axis=axis, equal_var=equal_var,
nan_policy=nan_policy,
random_state=random_state,
alternative=alternative)
else:
n1 = a.shape[axis]
n2 = b.shape[axis]
if trim == 0:
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
m1 = np.mean(a, axis)
m2 = np.mean(b, axis)
else:
v1, m1, n1 = _ttest_trim_var_mean_len(a, trim, axis)
v2, m2, n2 = _ttest_trim_var_mean_len(b, trim, axis)
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(m1, m2, denom, df, alternative)
return Ttest_indResult(*res)
def _ttest_trim_var_mean_len(a, trim, axis):
"""Variance, mean, and length of winsorized input along specified axis"""
# for use with `ttest_ind` when trimming.
# further calculations in this test assume that the inputs are sorted.
# From [4] Section 1 "Let x_1, ..., x_n be n ordered observations..."
a = np.sort(a, axis=axis)
# `g` is the number of elements to be replaced on each tail, converted
# from a percentage amount of trimming
n = a.shape[axis]
g = int(n * trim)
# Calculate the Winsorized variance of the input samples according to
# specified `g`
v = _calculate_winsorized_variance(a, g, axis)
# the total number of elements in the trimmed samples
n -= 2 * g
# calculate the g-times trimmed mean, as defined in [4] (1-1)
m = trim_mean(a, trim, axis=axis)
return v, m, n
def _calculate_winsorized_variance(a, g, axis):
"""Calculates g-times winsorized variance along specified axis"""
# it is expected that the input `a` is sorted along the correct axis
if g == 0:
return np.var(a, ddof=1, axis=axis)
# move the intended axis to the end that way it is easier to manipulate
a_win = np.moveaxis(a, axis, -1)
# save where NaNs are for later use.
nans_indices = np.any(np.isnan(a_win), axis=-1)
# Winsorization and variance calculation are done in one step in [4]
# (1-3), but here winsorization is done first; replace the left and
# right sides with the repeating value. This can be see in effect in (
# 1-3) in [4], where the leftmost and rightmost tails are replaced with
# `(g + 1) * x_{g + 1}` on the left and `(g + 1) * x_{n - g}` on the
# right. Zero-indexing turns `g + 1` to `g`, and `n - g` to `- g - 1` in
# array indexing.
a_win[..., :g] = a_win[..., [g]]
a_win[..., -g:] = a_win[..., [-g - 1]]
# Determine the variance. In [4], the degrees of freedom is expressed as
# `h - 1`, where `h = n - 2g` (unnumbered equations in Section 1, end of
# page 369, beginning of page 370). This is converted to NumPy's format,
# `n - ddof` for use with with `np.var`. The result is converted to an
# array to accommodate indexing later.
var_win = np.asarray(np.var(a_win, ddof=(2 * g + 1), axis=-1))
# with `nan_policy='propagate'`, NaNs may be completely trimmed out
# because they were sorted into the tail of the array. In these cases,
# replace computed variances with `np.nan`.
var_win[nans_indices] = np.nan
return var_win
def _data_partitions(data, permutations, size_a, axis=-1, random_state=None):
"""All partitions of data into sets of given lengths, ignoring order"""
random_state = check_random_state(random_state)
if axis < 0: # we'll be adding a new dimension at the end
axis = data.ndim + axis
# prepare permutation indices
size = data.shape[axis]
# number of distinct combinations
n_max = special.comb(size, size_a)
if permutations < n_max:
indices = np.array([random_state.permutation(size)
for i in range(permutations)]).T
else:
permutations = n_max
indices = np.array([np.concatenate(z)
for z in _all_partitions(size_a, size-size_a)]).T
data = data.swapaxes(axis, -1) # so we can index along a new dimension
data = data[..., indices] # generate permutations
data = data.swapaxes(-2, axis) # restore original axis order
data = np.moveaxis(data, -1, 0) # permutations indexed along axis 0
return data, permutations
def _calc_t_stat(a, b, equal_var, axis=-1):
"""Calculate the t statistic along the given dimension."""
na = a.shape[axis]
nb = b.shape[axis]
avg_a = np.mean(a, axis=axis)
avg_b = np.mean(b, axis=axis)
var_a = np.var(a, axis=axis, ddof=1)
var_b = np.var(b, axis=axis, ddof=1)
if not equal_var:
denom = _unequal_var_ttest_denom(var_a, na, var_b, nb)[1]
else:
denom = _equal_var_ttest_denom(var_a, na, var_b, nb)[1]
return (avg_a-avg_b)/denom
def _permutation_ttest(a, b, permutations, axis=0, equal_var=True,
nan_policy='propagate', random_state=None,
alternative="two-sided"):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores
using permutation methods.
This test is similar to `stats.ttest_ind`, except it doesn't rely on an
approximate normality assumption since it uses a permutation test.
This function is only called from ttest_ind when permutations is not None.
Parameters
----------
a, b : array_like
The arrays must be broadcastable, except along the dimension
corresponding to `axis` (the zeroth, by default).
axis : int, optional
The axis over which to operate on a and b.
permutations : int, optional
Number of permutations used to calculate p-value. If greater than or
equal to the number of distinct permutations, perform an exact test.
equal_var : bool, optional
If False, an equal variance (Welch's) t-test is conducted. Otherwise,
an ordinary t-test is conducted.
random_state : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
Pseudorandom number generator state used for generating random
permutations.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The p-value.
"""
random_state = check_random_state(random_state)
t_stat_observed = _calc_t_stat(a, b, equal_var, axis=axis)
na = a.shape[axis]
mat = _broadcast_concatenate((a, b), axis=axis)
mat = np.moveaxis(mat, axis, -1)
mat_perm, permutations = _data_partitions(mat, permutations, size_a=na,
random_state=random_state)
a = mat_perm[..., :na]
b = mat_perm[..., na:]
t_stat = _calc_t_stat(a, b, equal_var)
compare = {"less": np.less_equal,
"greater": np.greater_equal,
"two-sided": lambda x, y: (x <= -np.abs(y)) | (x >= np.abs(y))}
# Calculate the p-values
cmps = compare[alternative](t_stat, t_stat_observed)
pvalues = cmps.sum(axis=0) / permutations
# nans propagate naturally in statistic calculation, but need to be
# propagated manually into pvalues
if nan_policy == 'propagate' and np.isnan(t_stat_observed).any():
if np.ndim(pvalues) == 0:
pvalues = np.float64(np.nan)
else:
pvalues[np.isnan(t_stat_observed)] = np.nan
return (t_stat_observed, pvalues)
def _get_len(a, axis, msg):
try:
n = a.shape[axis]
except IndexError:
raise np.AxisError(axis, a.ndim, msg) from None
return n
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate', alternative="two-sided"):
"""Calculate the t-test on TWO RELATED samples of scores, a and b.
This is a test for the null hypothesis that two related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions underlying the samples
are unequal.
* 'less': the mean of the distribution underlying the first sample
is less than the mean of the distribution underlying the second
sample.
* 'greater': the mean of the distribution underlying the first
sample is greater than the mean of the distribution underlying
the second sample.
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
The p-value.
Notes
-----
Examples for use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> rvs2 = (stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
... + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
>>> stats.ttest_rel(rvs1, rvs2)
Ttest_relResult(statistic=-0.4549717054410304, pvalue=0.6493274702088672)
>>> rvs3 = (stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng)
... + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
>>> stats.ttest_rel(rvs1, rvs3)
Ttest_relResult(statistic=-5.879467544540889, pvalue=7.540777129099917e-09)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis, alternative)
na = _get_len(a, axis, "first argument")
nb = _get_len(b, axis, "second argument")
if na != nb:
raise ValueError('unequal length arrays')
if na == 0:
return _ttest_nans(a, b, axis, Ttest_relResult)
n = a.shape[axis]
df = n - 1
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t, alternative)
return Ttest_relResult(t, prob)
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""Count the number of non-masked elements of an array.
This function behaves like `np.ma.count`, but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def _m_broadcast_to(a, shape):
if np.ma.isMaskedArray(a):
return np.ma.masked_array(np.broadcast_to(a, shape),
mask=np.broadcast_to(a.mask, shape))
return np.broadcast_to(a, shape, subok=True)
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
The power in the Cressie-Read power divergence statistic. The default
is 1. For convenience, `lambda_` may be assigned one of the following
strings, in which case the corresponding numerical value is used:
* ``"pearson"`` (value 1)
Pearson's chi-squared statistic. In this case, the function is
equivalent to `chisquare`.
* ``"log-likelihood"`` (value 0)
Log-likelihood ratio. Also known as the G-test [3]_.
* ``"freeman-tukey"`` (value -1/2)
Freeman-Tukey statistic.
* ``"mod-log-likelihood"`` (value -1)
Modified log-likelihood ratio.
* ``"neyman"`` (value -2)
Neyman's statistic.
* ``"cressie-read"`` (value 2/3)
The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `power_divergence` raises an error if the sums
do not agree within a relative tolerance of ``1e-8``.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", https://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, str):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. "
"Valid strings are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
f_obs_float = f_obs.astype(np.float64)
if f_exp is not None:
f_exp = np.asanyarray(f_exp)
bshape = _broadcast_shapes(f_obs_float.shape, f_exp.shape)
f_obs_float = _m_broadcast_to(f_obs_float, bshape)
f_exp = _m_broadcast_to(f_exp, bshape)
rtol = 1e-8 # to pass existing tests
with np.errstate(invalid='ignore'):
f_obs_sum = f_obs_float.sum(axis=axis)
f_exp_sum = f_exp.sum(axis=axis)
relative_diff = (np.abs(f_obs_sum - f_exp_sum) /
np.minimum(f_obs_sum, f_exp_sum))
diff_gt_tol = (relative_diff > rtol).any()
if diff_gt_tol:
msg = (f"For each axis slice, the sum of the observed "
f"frequencies must agree with the sum of the "
f"expected frequencies to a relative tolerance "
f"of {rtol}, but the percent differences are:\n"
f"{relative_diff}")
raise ValueError(msg)
else:
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = f_obs.mean(axis=axis, keepdims=True)
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs_float - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""Calculate a one-way chi-square test.
The chi-square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
scipy.stats.power_divergence
scipy.stats.fisher_exact : Fisher exact test on a 2x2 contingency table.
scipy.stats.barnard_exact : An unconditional exact test. An alternative
to chi-squared test for small sample sizes.
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5. According to [3]_, the
total number of samples is recommended to be greater than 13,
otherwise exact tests (such as Barnard's Exact test) should be used
because they do not overreject.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `chisquare` raises an error if the sums do not
agree within a relative tolerance of ``1e-8``.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not chi-square, in which case this test
is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] Pearson, Karl. "On the criterion that a given system of deviations from the probable
in the case of a correlated system of variables is such that it can be reasonably
supposed to have arisen from random sampling", Philosophical Magazine. Series 5. 50
(1900), pp. 157-175.
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def _compute_dplus(cdfvals):
"""Computes D+ as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals : array_like
Sorted array of CDF values between 0 and 1
Returns
-------
Maximum distance of the CDF values below Uniform(0, 1)
"""
n = len(cdfvals)
return (np.arange(1.0, n + 1) / n - cdfvals).max()
def _compute_dminus(cdfvals):
"""Computes D- as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals : array_like
Sorted array of CDF values between 0 and 1
Returns
-------
Maximum distance of the CDF values above Uniform(0, 1)
"""
n = len(cdfvals)
return (cdfvals - np.arange(0.0, n)/n).max()
def ks_1samp(x, cdf, args=(), alternative='two-sided', mode='auto'):
"""
Performs the one-sample Kolmogorov-Smirnov test for goodness of fit.
This test compares the underlying distribution F(x) of a sample
against a given continuous distribution G(x). See Notes for a description
of the available null and alternative hypotheses.
Parameters
----------
x : array_like
a 1-D array of observations of iid random variables.
cdf : callable
callable used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used with `cdf`.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : selects one of the other options.
* 'exact' : uses the exact distribution of test statistic.
* 'approx' : approximates the two-sided probability with twice
the one-sided probability
* 'asymp': uses asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D- (depending on the value
of 'alternative')
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp, kstest
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = np.linspace(-15, 15, 9)
>>> stats.ks_1samp(x, stats.norm.cdf)
(0.44435602715924361, 0.038850142705171065)
>>> stats.ks_1samp(stats.norm.rvs(size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.165471391799..., pvalue=0.007331283245...)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that `` CDF(x) < norm.cdf(x)``:
>>> x = stats.norm.rvs(loc=0.2, size=100, random_state=rng)
>>> stats.ks_1samp(x, stats.norm.cdf, alternative='less')
KstestResult(statistic=0.100203351482..., pvalue=0.125544644447...)
Reject null hypothesis in favor of alternative hypothesis: less
>>> stats.ks_1samp(x, stats.norm.cdf, alternative='greater')
KstestResult(statistic=0.018749806388..., pvalue=0.920581859791...)
Reject null hypothesis in favor of alternative hypothesis: greater
>>> stats.ks_1samp(x, stats.norm.cdf)
KstestResult(statistic=0.100203351482..., pvalue=0.250616879765...)
Don't reject null hypothesis in favor of alternative hypothesis: two-sided
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> stats.ks_1samp(stats.t.rvs(100,size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.064273776544..., pvalue=0.778737758305...)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> stats.ks_1samp(stats.t.rvs(3,size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.128678487493..., pvalue=0.066569081515...)
"""
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'greater', 'less']:
raise ValueError("Unexpected alternative %s" % alternative)
if np.ma.is_masked(x):
x = x.compressed()
N = len(x)
x = np.sort(x)
cdfvals = cdf(x, *args)
if alternative == 'greater':
Dplus = _compute_dplus(cdfvals)
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative == 'less':
Dminus = _compute_dminus(cdfvals)
return KstestResult(Dminus, distributions.ksone.sf(Dminus, N))
# alternative == 'two-sided':
Dplus = _compute_dplus(cdfvals)
Dminus = _compute_dminus(cdfvals)
D = np.max([Dplus, Dminus])
if mode == 'auto': # Always select exact
mode = 'exact'
if mode == 'exact':
prob = distributions.kstwo.sf(D, N)
elif mode == 'asymp':
prob = distributions.kstwobign.sf(D * np.sqrt(N))
else:
# mode == 'approx'
prob = 2 * distributions.ksone.sf(D, N)
prob = np.clip(prob, 0, 1)
return KstestResult(D, prob)
Ks_2sampResult = KstestResult
def _compute_prob_outside_square(n, h):
"""
Compute the proportion of paths that pass outside the two diagonal lines.
Parameters
----------
n : integer
n > 0
h : integer
0 <= h <= n
Returns
-------
p : float
The proportion of paths that pass outside the lines x-y = +/-h.
"""
# Compute Pr(D_{n,n} >= h/n)
# Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... )
# / binom(2n, n)
# This formulation exhibits subtractive cancellation.
# Instead divide each term by binom(2n, n), then factor common terms
# and use a Horner-like algorithm
# P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...)))))
P = 0.0
k = int(np.floor(n / h))
while k >= 0:
p1 = 1.0
# Each of the Ai terms has numerator and denominator with
# h simple terms.
for j in range(h):
p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)
P = p1 * (1.0 - P)
k -= 1
return 2 * P
def _count_paths_outside_method(m, n, g, h):
"""Count the number of paths that pass outside the specified diagonal.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The number of paths that go low.
The calculation may overflow - check for a finite answer.
Raises
------
FloatingPointError: Raised if the intermediate computation goes outside
the range of a float.
Notes
-----
Count the integer lattice paths from (0, 0) to (m, n), which at some
point (x, y) along the path, satisfy:
m*y <= n*x - h*g
The paths make steps of size +1 in either positive x or positive y
directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Compute #paths which stay lower than x/m-y/n = h/lcm(m,n)
# B(x, y) = #{paths from (0,0) to (x,y) without
# previously crossing the boundary}
# = binom(x, y) - #{paths which already reached the boundary}
# Multiply by the number of path extensions going from (x, y) to (m, n)
# Sum.
# Probability is symmetrical in m, n. Computation below assumes m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Not every x needs to be considered.
# xj holds the list of x values to be checked.
# Wherever n*x/m + ng*h crosses an integer
lxj = n + (mg-h)//mg
xj = [(h + mg * j + ng-1)//ng for j in range(lxj)]
# B is an array just holding a few values of B(x,y), the ones needed.
# B[j] == B(x_j, j)
if lxj == 0:
return np.round(special.binom(m + n, n))
B = np.zeros(lxj)
B[0] = 1
# Compute the B(x, y) terms
# The binomial coefficient is an integer, but special.binom()
# may return a float. Round it to the nearest integer.
for j in range(1, lxj):
Bj = np.round(special.binom(xj[j] + j, j))
if not np.isfinite(Bj):
raise FloatingPointError()
for i in range(j):
bin = np.round(special.binom(xj[j] - xj[i] + j - i, j-i))
Bj -= bin * B[i]
B[j] = Bj
if not np.isfinite(Bj):
raise FloatingPointError()
# Compute the number of path extensions...
num_paths = 0
for j in range(lxj):
bin = np.round(special.binom((m-xj[j]) + (n - j), n-j))
term = B[j] * bin
if not np.isfinite(term):
raise FloatingPointError()
num_paths += term
return np.round(num_paths)
def _attempt_exact_2kssamp(n1, n2, g, d, alternative):
"""Attempts to compute the exact 2sample probability.
n1, n2 are the sample sizes
g is the gcd(n1, n2)
d is the computed max difference in ECDFs
Returns (success, d, probability)
"""
lcm = (n1 // g) * n2
h = int(np.round(d * lcm))
d = h * 1.0 / lcm
if h == 0:
return True, d, 1.0
saw_fp_error, prob = False, np.nan
try:
if alternative == 'two-sided':
if n1 == n2:
prob = _compute_prob_outside_square(n1, h)
else:
prob = _compute_outer_prob_inside_method(n1, n2, g, h)
else:
if n1 == n2:
# prob = binom(2n, n-h) / binom(2n, n)
# Evaluating in that form incurs roundoff errors
# from special.binom. Instead calculate directly
jrange = np.arange(h)
prob = np.prod((n1 - jrange) / (n1 + jrange + 1.0))
else:
num_paths = _count_paths_outside_method(n1, n2, g, h)
bin = special.binom(n1 + n2, n1)
if not np.isfinite(bin) or not np.isfinite(num_paths)\
or num_paths > bin:
saw_fp_error = True
else:
prob = num_paths / bin
except FloatingPointError:
saw_fp_error = True
if saw_fp_error:
return False, d, np.nan
if not (0 <= prob <= 1):
return False, d, prob
return True, d, prob
def ks_2samp(data1, data2, alternative='two-sided', mode='auto'):
"""
Performs the two-sample Kolmogorov-Smirnov test for goodness of fit.
This test compares the underlying continuous distributions F(x) and G(x)
of two independent samples. See Notes for a description
of the available null and alternative hypotheses.
Parameters
----------
data1, data2 : array_like, 1-Dimensional
Two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS statistic.
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
kstest, ks_1samp, epps_singleton_2samp, anderson_ksamp
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
If the KS statistic is small or the p-value is high, then we cannot
reject the null hypothesis in favor of the alternative.
If the mode is 'auto', the computation is exact if the sample sizes are
less than 10000. For larger sizes, the computation uses the
Kolmogorov-Smirnov distributions to compute an approximate value.
The 'two-sided' 'exact' computation computes the complementary probability
and then subtracts from 1. As such, the minimum probability it can return
is about 1e-16. While the algorithm itself is exact, numerical
errors may accumulate for large sample sizes. It is most suited to
situations in which one of the sample sizes is only a few thousand.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.
References
----------
.. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov
Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1, random_state=rng)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs2)
KstestResult(statistic=0.24833333333333332, pvalue=5.846586728086578e-07)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs3)
KstestResult(statistic=0.07833333333333334, pvalue=0.4379658456442945)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs4)
KstestResult(statistic=0.12166666666666667, pvalue=0.05401863039081145)
"""
if mode not in ['auto', 'exact', 'asymp']:
raise ValueError(f'Invalid value for mode: {mode}')
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'less', 'greater']:
raise ValueError(f'Invalid value for alternative: {alternative}')
MAX_AUTO_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= MAX_AUTO_N
if np.ma.is_masked(data1):
data1 = data1.compressed()
if np.ma.is_masked(data2):
data2 = data2.compressed()
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
if min(n1, n2) == 0:
raise ValueError('Data passed to ks_2samp must not be empty')
data_all = np.concatenate([data1, data2])
# using searchsorted solves equal data problem
cdf1 = np.searchsorted(data1, data_all, side='right') / n1
cdf2 = np.searchsorted(data2, data_all, side='right') / n2
cddiffs = cdf1 - cdf2
# Ensure sign of minS is not negative.
minS = np.clip(-np.min(cddiffs), 0, 1)
maxS = np.max(cddiffs)
alt2Dvalue = {'less': minS, 'greater': maxS, 'two-sided': max(minS, maxS)}
d = alt2Dvalue[alternative]
g = gcd(n1, n2)
n1g = n1 // g
n2g = n2 // g
prob = -np.inf
original_mode = mode
if mode == 'auto':
mode = 'exact' if max(n1, n2) <= MAX_AUTO_N else 'asymp'
elif mode == 'exact':
# If lcm(n1, n2) is too big, switch from exact to asymp
if n1g >= np.iinfo(np.int32).max / n2g:
mode = 'asymp'
warnings.warn(
f"Exact ks_2samp calculation not possible with samples sizes "
f"{n1} and {n2}. Switching to 'asymp'.", RuntimeWarning)
if mode == 'exact':
success, d, prob = _attempt_exact_2kssamp(n1, n2, g, d, alternative)
if not success:
mode = 'asymp'
if original_mode == 'exact':
warnings.warn(f"ks_2samp: Exact calculation unsuccessful. "
f"Switching to mode={mode}.", RuntimeWarning)
if mode == 'asymp':
# The product n1*n2 is large. Use Smirnov's asymptoptic formula.
# Ensure float to avoid overflow in multiplication
# sorted because the one-sided formula is not symmetric in n1, n2
m, n = sorted([float(n1), float(n2)], reverse=True)
en = m * n / (m + n)
if alternative == 'two-sided':
prob = distributions.kstwo.sf(d, np.round(en))
else:
z = np.sqrt(en) * d
# Use Hodges' suggested approximation Eqn 5.3
# Requires m to be the larger of (n1, n2)
expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
prob = np.exp(expt)
prob = np.clip(prob, 0, 1)
return KstestResult(d, prob)
def _parse_kstest_args(data1, data2, args, N):
# kstest allows many different variations of arguments.
# Pull out the parsing into a separate function
# (xvals, yvals, ) # 2sample
# (xvals, cdf function,..)
# (xvals, name of distribution, ...)
# (name of distribution, name of distribution, ...)
# Returns xvals, yvals, cdf
# where cdf is a cdf function, or None
# and yvals is either an array_like of values, or None
# and xvals is array_like.
rvsfunc, cdf = None, None
if isinstance(data1, str):
rvsfunc = getattr(distributions, data1).rvs
elif callable(data1):
rvsfunc = data1
if isinstance(data2, str):
cdf = getattr(distributions, data2).cdf
data2 = None
elif callable(data2):
cdf = data2
data2 = None
data1 = np.sort(rvsfunc(*args, size=N) if rvsfunc else data1)
return data1, data2, cdf
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='auto'):
"""
Performs the (one-sample or two-sample) Kolmogorov-Smirnov test for
goodness of fit.
The one-sample test compares the underlying distribution F(x) of a sample
against a given distribution G(x). The two-sample test compares the
underlying distributions of two independent samples. Both tests are valid
only for continuous distributions.
Parameters
----------
rvs : str, array_like, or callable
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
If a string, it should be the name of a distribution in `scipy.stats`,
which will be used to generate random variables.
cdf : str, array_like or callable
If array_like, it should be a 1-D array of observations of random
variables, and the two-sample test is performed
(and rvs must be array_like).
If a callable, that callable is used to calculate the cdf.
If a string, it should be the name of a distribution in `scipy.stats`,
which will be used as the cdf function.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings or
callables.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : selects one of the other options.
* 'exact' : uses the exact distribution of test statistic.
* 'approx' : approximates the two-sided probability with twice the
one-sided probability
* 'asymp': uses asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
KstestResult(statistic=0.444356027159..., pvalue=0.038850140086...)
>>> stats.kstest(stats.norm.rvs(size=100, random_state=rng), stats.norm.cdf)
KstestResult(statistic=0.165471391799..., pvalue=0.007331283245...)
The above lines are equivalent to:
>>> stats.kstest(stats.norm.rvs, 'norm', N=100)
KstestResult(statistic=0.113810164200..., pvalue=0.138690052319...) # may vary
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``CDF(x) < norm.cdf(x)``:
>>> x = stats.norm.rvs(loc=0.2, size=100, random_state=rng)
>>> stats.kstest(x, 'norm', alternative='less')
KstestResult(statistic=0.1002033514..., pvalue=0.1255446444...)
Reject null hypothesis in favor of alternative hypothesis: less
>>> stats.kstest(x, 'norm', alternative='greater')
KstestResult(statistic=0.018749806388..., pvalue=0.920581859791...)
Don't reject null hypothesis in favor of alternative hypothesis: greater
>>> stats.kstest(x, 'norm')
KstestResult(statistic=0.100203351482..., pvalue=0.250616879765...)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> stats.kstest(stats.t.rvs(100, size=100, random_state=rng), 'norm')
KstestResult(statistic=0.064273776544..., pvalue=0.778737758305...)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> stats.kstest(stats.t.rvs(3, size=100, random_state=rng), 'norm')
KstestResult(statistic=0.128678487493..., pvalue=0.066569081515...)
"""
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative not in ['two-sided', 'greater', 'less']:
raise ValueError("Unexpected alternative %s" % alternative)
xvals, yvals, cdf = _parse_kstest_args(rvs, cdf, args, N)
if cdf:
return ks_1samp(xvals, cdf, args=args, alternative=alternative,
mode=mode)
return ks_2samp(xvals, yvals, alternative=alternative, mode=mode)
def tiecorrect(rankvals):
"""Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `~scipy.stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
@_axis_nan_policy_factory(RanksumsResult, n_samples=2)
def ranksums(x, y, alternative='two-sided'):
"""Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': one of the distributions (underlying `x` or `y`) is
stochastically greater than the other.
* 'less': the distribution underlying `x` is stochastically less
than the distribution underlying `y`.
* 'greater': the distribution underlying `x` is stochastically greater
than the distribution underlying `y`.
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed.
pvalue : float
The p-value of the test.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
Examples
--------
We can test the hypothesis that two independent unequal-sized samples are
drawn from the same distribution with computing the Wilcoxon rank-sum
statistic.
>>> from scipy.stats import ranksums
>>> rng = np.random.default_rng()
>>> sample1 = rng.uniform(-1, 1, 200)
>>> sample2 = rng.uniform(-0.5, 1.5, 300) # a shifted distribution
>>> ranksums(sample1, sample2)
RanksumsResult(statistic=-7.887059, pvalue=3.09390448e-15) # may vary
>>> ranksums(sample1, sample2, alternative='less')
RanksumsResult(statistic=-7.750585297581713, pvalue=4.573497606342543e-15) # may vary
>>> ranksums(sample1, sample2, alternative='greater')
RanksumsResult(statistic=-7.750585297581713, pvalue=0.9999999999999954) # may vary
The p-value of less than ``0.05`` indicates that this test rejects the
hypothesis at the 5% significance level.
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
z, prob = _normtest_finish(z, alternative)
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
@_axis_nan_policy_factory(KruskalResult, n_samples=None)
def kruskal(*args, nan_policy='propagate'):
"""Compute the Kruskal-Wallis H-test for independent samples.
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments. Samples must be one-dimensional.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties.
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution. The p-value returned is the survival function of
the chi square distribution evaluated at H.
See Also
--------
f_oneway : 1-way ANOVA.
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements.
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.0301973834223185)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
elif arg.ndim != 1:
raise ValueError("Samples must be one-dimensional.")
n = np.asarray(list(map(len, args)))
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', 'raise' or 'omit'")
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i]
totaln = np.sum(n, dtype=float)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""Compute the Friedman test for repeated measurements.
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
The test statistic, correcting for ties.
pvalue : float
The associated p-value assuming that the test statistic has a chi
squared distribution.
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] https://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('At least 3 sets of measurements must be given '
'for Friedman test, got {}.'.format(k))
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for d in data:
replist, repnum = find_repeats(array(d))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / (k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
BrunnerMunzelResult = namedtuple('BrunnerMunzelResult',
('statistic', 'pvalue'))
def brunnermunzel(x, y, alternative="two-sided", distribution="t",
nan_policy='propagate'):
"""Compute the Brunner-Munzel test on samples x and y.
The Brunner-Munzel test is a nonparametric test of the null hypothesis that
when values are taken one by one from each group, the probabilities of
getting large values in both groups are equal.
Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the
assumption of equivariance of two groups. Note that this does not assume
the distributions are same. This test works on two independent samples,
which may have different sizes.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
distribution : {'t', 'normal'}, optional
Defines how to get the p-value.
The following options are available (default is 't'):
* 't': get the p-value by t-distribution
* 'normal': get the p-value by standard normal distribution.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Brunner-Munzer W statistic.
pvalue : float
p-value assuming an t distribution. One-sided or
two-sided, depending on the choice of `alternative` and `distribution`.
See Also
--------
mannwhitneyu : Mann-Whitney rank test on two samples.
Notes
-----
Brunner and Munzel recommended to estimate the p-value by t-distribution
when the size of data is 50 or less. If the size is lower than 10, it would
be better to use permuted Brunner Munzel test (see [2]_).
References
----------
.. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher
problem: Asymptotic theory and a small-sample approximation".
Biometrical Journal. Vol. 42(2000): 17-25.
.. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the
non-parametric Behrens-Fisher problem". Computational Statistics and
Data Analysis. Vol. 51(2007): 5192-5204.
Examples
--------
>>> from scipy import stats
>>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1]
>>> x2 = [3,3,4,3,1,2,3,1,1,5,4]
>>> w, p_value = stats.brunnermunzel(x1, x2)
>>> w
3.1374674823029505
>>> p_value
0.0057862086661515377
"""
x = np.asarray(x)
y = np.asarray(y)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == "omit" or npy == "omit":
nan_policy = "omit"
if contains_nan and nan_policy == "propagate":
return BrunnerMunzelResult(np.nan, np.nan)
elif contains_nan and nan_policy == "omit":
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.brunnermunzel(x, y, alternative, distribution)
nx = len(x)
ny = len(y)
if nx == 0 or ny == 0:
return BrunnerMunzelResult(np.nan, np.nan)
rankc = rankdata(np.concatenate((x, y)))
rankcx = rankc[0:nx]
rankcy = rankc[nx:nx+ny]
rankcx_mean = np.mean(rankcx)
rankcy_mean = np.mean(rankcy)
rankx = rankdata(x)
ranky = rankdata(y)
rankx_mean = np.mean(rankx)
ranky_mean = np.mean(ranky)
Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
Sx /= nx - 1
Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
Sy /= ny - 1
wbfn = nx * ny * (rankcy_mean - rankcx_mean)
wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
if distribution == "t":
df_numer = np.power(nx * Sx + ny * Sy, 2.0)
df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
df = df_numer / df_denom
p = distributions.t.cdf(wbfn, df)
elif distribution == "normal":
p = distributions.norm.cdf(wbfn)
else:
raise ValueError(
"distribution should be 't' or 'normal'")
if alternative == "greater":
pass
elif alternative == "less":
p = 1 - p
elif alternative == "two-sided":
p = 2 * np.min([p, 1-p])
else:
raise ValueError(
"alternative should be 'less', 'greater' or 'two-sided'")
return BrunnerMunzelResult(wbfn, p)
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Combine p-values from independent tests bearing upon the same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'pearson', 'tippett', 'stouffer',
'mudholkar_george'}, optional
Name of method to use to combine p-values.
The following methods are available (default is 'fisher'):
* 'fisher': Fisher's method (Fisher's combined probability test), the
sum of the logarithm of the p-values
* 'pearson': Pearson's method (similar to Fisher's but uses sum of the
complement of the p-values inside the logarithms)
* 'tippett': Tippett's method (minimum of p-values)
* 'stouffer': Stouffer's Z-score method
* 'mudholkar_george': the difference of Fisher's and Pearson's methods
divided by 2
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method.
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [6]_ [7]_.
The Pearson's method uses :math:`log(1-p_i)` inside the sum whereas Fisher's
method uses :math:`log(p_i)` [4]_. For Fisher's and Pearson's method, the
sum of the logarithms is multiplied by -2 in the implementation. This
quantity has a chi-square distribution that determines the p-value. The
`mudholkar_george` method is the difference of the Fisher's and Pearson's
test statistics, each of which include the -2 factor [4]_. However, the
`mudholkar_george` method does not include these -2 factors. The test
statistic of `mudholkar_george` is the sum of logisitic random variables and
equation 3.6 in [3]_ is used to approximate the p-value based on Student's
t-distribution.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
random variables." Metrika 30.1 (1983): 1-13.
.. [4] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
combining p-values." Biometrika 105.1 (2018): 239-246.
.. [5] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [6] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [7] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
statistic = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'pearson':
statistic = -2 * np.sum(np.log1p(-pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'mudholkar_george':
normalizing_factor = np.sqrt(3/len(pvalues))/np.pi
statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
nu = 5 * len(pvalues) + 4
approx_factor = np.sqrt(nu / (nu - 2))
pval = distributions.t.sf(statistic * normalizing_factor
* approx_factor, nu)
elif method == 'tippett':
statistic = np.min(pvalues)
pval = distributions.beta.sf(statistic, 1, len(pvalues))
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(statistic)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher', 'pearson', \
'mudholkar_george', 'tippett', 'or 'stouffer'", method)
return (statistic, pval)
#####################################
# STATISTICAL DISTANCES #
#####################################
def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the first Wasserstein distance between two 1D distributions.
This distance is also known as the earth mover's distance, since it can be
seen as the minimum amount of "work" required to transform :math:`u` into
:math:`v`, where "work" is measured as the amount of distribution weight
that must be moved, multiplied by the distance it has to be moved.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The first Wasserstein distance between the distributions :math:`u` and
:math:`v` is:
.. math::
l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times
\mathbb{R}} |x-y| \mathrm{d} \pi (x, y)
where :math:`\Gamma (u, v)` is the set of (probability) distributions on
:math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and
:math:`v` on the first and second factors respectively.
If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and
:math:`v`, this distance also equals to:
.. math::
l_1(u, v) = \int_{-\infty}^{+\infty} |U-V|
See [2]_ for a proof of the equivalence of both definitions.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric
.. [2] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related
Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`.
Examples
--------
>>> from scipy.stats import wasserstein_distance
>>> wasserstein_distance([0, 1, 3], [5, 6, 8])
5.0
>>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2])
0.25
>>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4],
... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5])
4.0781331438047861
"""
return _cdf_distance(1, u_values, v_values, u_weights, v_weights)
def energy_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""Compute the energy distance between two 1D distributions.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The energy distance between two distributions :math:`u` and :math:`v`, whose
respective CDFs are :math:`U` and :math:`V`, equals to:
.. math::
D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| -
\mathbb E|Y - Y'| \right)^{1/2}
where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are
independent random variables whose probability distribution is :math:`u`
(resp. :math:`v`).
As shown in [2]_, for one-dimensional real-valued variables, the energy
distance is linked to the non-distribution-free version of the Cramér-von
Mises distance:
.. math::
D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2
\right)^{1/2}
Note that the common Cramér-von Mises criterion uses the distribution-free
version of the distance. See [2]_ (section 2), for more details about both
versions of the distance.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance
.. [2] Szekely "E-statistics: The energy of statistical samples." Bowling
Green State University, Department of Mathematics and Statistics,
Technical Report 02-16 (2002).
.. [3] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews:
Computational Statistics, 8(1):27-38 (2015).
.. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
Examples
--------
>>> from scipy.stats import energy_distance
>>> energy_distance([0], [2])
2.0000000000000004
>>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2])
1.0000000000000002
>>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ],
... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8])
0.88003340976158217
"""
return np.sqrt(2) * _cdf_distance(2, u_values, v_values,
u_weights, v_weights)
def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute, between two one-dimensional distributions :math:`u` and
:math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the
statistical distance that is defined as:
.. math::
l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p}
p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2
gives the energy distance.
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
"""
u_values, u_weights = _validate_distribution(u_values, u_weights)
v_values, v_weights = _validate_distribution(v_values, v_weights)
u_sorter = np.argsort(u_values)
v_sorter = np.argsort(v_values)
all_values = np.concatenate((u_values, v_values))
all_values.sort(kind='mergesort')
# Compute the differences between pairs of successive values of u and v.
deltas = np.diff(all_values)
# Get the respective positions of the values of u and v among the values of
# both distributions.
u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right')
v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right')
# Calculate the CDFs of u and v using their weights, if specified.
if u_weights is None:
u_cdf = u_cdf_indices / u_values.size
else:
u_sorted_cumweights = np.concatenate(([0],
np.cumsum(u_weights[u_sorter])))
u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1]
if v_weights is None:
v_cdf = v_cdf_indices / v_values.size
else:
v_sorted_cumweights = np.concatenate(([0],
np.cumsum(v_weights[v_sorter])))
v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1]
# Compute the value of the integral based on the CDFs.
# If p = 1 or p = 2, we avoid using np.power, which introduces an overhead
# of about 15%.
if p == 1:
return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas))
if p == 2:
return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas)))
return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p),
deltas)), 1/p)
def _validate_distribution(values, weights):
"""
Validate the values and weights from a distribution input of `cdf_distance`
and return them as ndarray objects.
Parameters
----------
values : array_like
Values observed in the (empirical) distribution.
weights : array_like
Weight for each value.
Returns
-------
values : ndarray
Values as ndarray.
weights : ndarray
Weights as ndarray.
"""
# Validate the value array.
values = np.asarray(values, dtype=float)
if len(values) == 0:
raise ValueError("Distribution can't be empty.")
# Validate the weight array, if specified.
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(weights) != len(values):
raise ValueError('Value and weight array-likes for the same '
'empirical distribution must be of the same size.')
if np.any(weights < 0):
raise ValueError('All weights must be non-negative.')
if not 0 < np.sum(weights) < np.inf:
raise ValueError('Weight array-like sum must be positive and '
'finite. Set as None for an equal distribution of '
'weight.')
return values, weights
return values, None
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
def _sum_of_squares(a, axis=0):
"""Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See Also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def _square_of_sums(a, axis=0):
"""Sum elements of the input array, and return the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See Also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
def rankdata(a, method='average', *, axis=None):
"""Assign ranks to data, dealing with ties appropriately.
By default (``axis=None``), the data array is first flattened, and a flat
array of ranks is returned. Separately reshape the rank array to the
shape of the data array if desired (see Examples).
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked.
method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
* 'average': The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
* 'min': The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
* 'max': The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
* 'dense': Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
* 'ordinal': All values are given a distinct rank, corresponding to
the order that the values occur in `a`.
axis : {None, int}, optional
Axis along which to perform the ranking. If ``None``, the data array
is first flattened.
Returns
-------
ranks : ndarray
An array of size equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
>>> rankdata([[0, 2], [3, 2]]).reshape(2,2)
array([[1. , 2.5],
[4. , 2.5]])
>>> rankdata([[0, 2, 2], [3, 2, 5]], axis=1)
array([[1. , 2.5, 2.5],
[2. , 1. , 3. ]])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
if axis is not None:
a = np.asarray(a)
if a.size == 0:
# The return values of `normalize_axis_index` are ignored. The
# call validates `axis`, even though we won't use it.
# use scipy._lib._util._normalize_axis_index when available
np.core.multiarray.normalize_axis_index(axis, a.ndim)
dt = np.float64 if method == 'average' else np.int_
return np.empty(a.shape, dtype=dt)
return np.apply_along_axis(rankdata, axis, a, method)
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
|
py | b409e1da2814f0f6cbaefa90eddae976244ac746 | # coding: utf-8
"""
Kubernetes workflow api
"""
from pprint import pformat
from six import iteritems
class DagV1WorkflowStep(object):
"""
Attributes:
swagger_types (dict): The key is the attribute name
and the value is the attribute type.
attribute_map (dict): The key is the attribute name
and the value is the json key in the definition.
"""
swagger_types = {
'name': 'string',
'job_template': 'V1beta1JobTemplateSpec',
'external_ref': 'V1ObjectReference',
'dependencies': 'list[string]'
}
attribute_map = {
'name': 'name',
'job_template': 'jobTemplate',
'external_ref': 'externalRef',
'dependencies': 'dependencies'
}
def __init__(self, name=None, job_template=None, external_ref=None,
dependencies=None):
"""
DagV1WorkflowStep
"""
self._name = None
self._job_template = None
self._external_ref = None
self._dependencies = None
if name is not None:
self.name = name
if job_template is not None:
self.job_template = job_template
if external_ref is not None:
self.external_ref = external_ref
if dependencies is not None:
self.dependencies = dependencies
@property
def name(self):
"""
Gets the name
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name
"""
self._name = name
@property
def job_template(self):
"""
Gets the job_template
"""
return self._job_template
@job_template.setter
def job_template(self, job_template):
"""
Sets the job_template
"""
self._job_template = job_template
@property
def external_ref(self):
"""
Gets the external_ref
"""
return self._external_ref
@external_ref.setter
def external_ref(self, external_ref):
"""
Sets the external_ref
"""
self._external_ref = external_ref
@property
def dependencies(self):
"""
Gets the dependencies
"""
return self._dependencies
@dependencies.setter
def dependencies(self, dependencies):
"""
Sets the dependencies
"""
self._dependencies = dependencies
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, DagV1WorkflowStep):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | b409e28735b03765b974eef935145ddc6cdd99e0 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from keystone.common import pemutils
from keystone import tests
from six import moves
# List of 2-tuples, (pem_type, pem_header)
headers = pemutils.PEM_TYPE_TO_HEADER.items()
def make_data(size, offset=0):
return ''.join([chr(x % 255) for x in moves.range(offset, size + offset)])
def make_base64_from_data(data):
return base64.b64encode(data)
def wrap_base64(base64_text):
wrapped_text = '\n'.join([base64_text[x:x + 64]
for x in moves.range(0, len(base64_text), 64)])
wrapped_text += '\n'
return wrapped_text
def make_pem(header, data):
base64_text = make_base64_from_data(data)
wrapped_text = wrap_base64(base64_text)
result = '-----BEGIN %s-----\n' % header
result += wrapped_text
result += '-----END %s-----\n' % header
return result
class PEM(object):
"""PEM text and it's associated data broken out, used for testing.
"""
def __init__(self, pem_header='CERTIFICATE', pem_type='cert',
data_size=70, data_offset=0):
self.pem_header = pem_header
self.pem_type = pem_type
self.data_size = data_size
self.data_offset = data_offset
self.data = make_data(self.data_size, self.data_offset)
self.base64_text = make_base64_from_data(self.data)
self.wrapped_base64 = wrap_base64(self.base64_text)
self.pem_text = make_pem(self.pem_header, self.data)
class TestPEMParseResult(tests.TestCase):
def test_pem_types(self):
for pem_type in pemutils.pem_types:
pem_header = pemutils.PEM_TYPE_TO_HEADER[pem_type]
r = pemutils.PEMParseResult(pem_type=pem_type)
self.assertEqual(pem_type, r.pem_type)
self.assertEqual(pem_header, r.pem_header)
pem_type = 'xxx'
self.assertRaises(ValueError,
pemutils.PEMParseResult, pem_type=pem_type)
def test_pem_headers(self):
for pem_header in pemutils.pem_headers:
pem_type = pemutils.PEM_HEADER_TO_TYPE[pem_header]
r = pemutils.PEMParseResult(pem_header=pem_header)
self.assertEqual(pem_type, r.pem_type)
self.assertEqual(pem_header, r.pem_header)
pem_header = 'xxx'
self.assertRaises(ValueError,
pemutils.PEMParseResult, pem_header=pem_header)
class TestPEMParse(tests.TestCase):
def test_parse_none(self):
text = ''
text += 'bla bla\n'
text += 'yada yada yada\n'
text += 'burfl blatz bingo\n'
parse_results = pemutils.parse_pem(text)
self.assertEqual(len(parse_results), 0)
self.assertEqual(pemutils.is_pem(text), False)
def test_parse_invalid(self):
p = PEM(pem_type='xxx',
pem_header='XXX')
text = p.pem_text
self.assertRaises(ValueError,
pemutils.parse_pem, text)
def test_parse_one(self):
data_size = 70
count = len(headers)
pems = []
for i in moves.range(count):
pems.append(PEM(pem_type=headers[i][0],
pem_header=headers[i][1],
data_size=data_size + i,
data_offset=i))
for i in moves.range(count):
p = pems[i]
text = p.pem_text
parse_results = pemutils.parse_pem(text)
self.assertEqual(len(parse_results), 1)
r = parse_results[0]
self.assertEqual(p.pem_type, r.pem_type)
self.assertEqual(p.pem_header, r.pem_header)
self.assertEqual(p.pem_text,
text[r.pem_start:r.pem_end])
self.assertEqual(p.wrapped_base64,
text[r.base64_start:r.base64_end])
self.assertEqual(p.data, r.binary_data)
def test_parse_one_embedded(self):
p = PEM(data_offset=0)
text = ''
text += 'bla bla\n'
text += 'yada yada yada\n'
text += p.pem_text
text += 'burfl blatz bingo\n'
parse_results = pemutils.parse_pem(text)
self.assertEqual(len(parse_results), 1)
r = parse_results[0]
self.assertEqual(p.pem_type, r.pem_type)
self.assertEqual(p.pem_header, r.pem_header)
self.assertEqual(p.pem_text,
text[r.pem_start:r.pem_end])
self.assertEqual(p.wrapped_base64,
text[r.base64_start: r.base64_end])
self.assertEqual(p.data, r.binary_data)
def test_parse_multple(self):
data_size = 70
count = len(headers)
pems = []
text = ''
for i in moves.range(count):
pems.append(PEM(pem_type=headers[i][0],
pem_header=headers[i][1],
data_size=data_size + i,
data_offset=i))
for i in moves.range(count):
text += pems[i].pem_text
parse_results = pemutils.parse_pem(text)
self.assertEqual(len(parse_results), count)
for i in moves.range(count):
r = parse_results[i]
p = pems[i]
self.assertEqual(p.pem_type, r.pem_type)
self.assertEqual(p.pem_header, r.pem_header)
self.assertEqual(p.pem_text,
text[r.pem_start:r.pem_end])
self.assertEqual(p.wrapped_base64,
text[r.base64_start: r.base64_end])
self.assertEqual(p.data, r.binary_data)
def test_parse_multple_find_specific(self):
data_size = 70
count = len(headers)
pems = []
text = ''
for i in moves.range(count):
pems.append(PEM(pem_type=headers[i][0],
pem_header=headers[i][1],
data_size=data_size + i,
data_offset=i))
for i in moves.range(count):
text += pems[i].pem_text
for i in moves.range(count):
parse_results = pemutils.parse_pem(text, pem_type=headers[i][0])
self.assertEqual(len(parse_results), 1)
r = parse_results[0]
p = pems[i]
self.assertEqual(p.pem_type, r.pem_type)
self.assertEqual(p.pem_header, r.pem_header)
self.assertEqual(p.pem_text,
text[r.pem_start:r.pem_end])
self.assertEqual(p.wrapped_base64,
text[r.base64_start:r.base64_end])
self.assertEqual(p.data, r.binary_data)
def test_parse_multple_embedded(self):
data_size = 75
count = len(headers)
pems = []
text = ''
for i in moves.range(count):
pems.append(PEM(pem_type=headers[i][0],
pem_header=headers[i][1],
data_size=data_size + i,
data_offset=i))
for i in moves.range(count):
text += 'bla bla\n'
text += 'yada yada yada\n'
text += pems[i].pem_text
text += 'burfl blatz bingo\n'
parse_results = pemutils.parse_pem(text)
self.assertEqual(len(parse_results), count)
for i in moves.range(count):
r = parse_results[i]
p = pems[i]
self.assertEqual(p.pem_type, r.pem_type)
self.assertEqual(p.pem_header, r.pem_header)
self.assertEqual(p.pem_text,
text[r.pem_start:r.pem_end])
self.assertEqual(p.wrapped_base64,
text[r.base64_start:r.base64_end])
self.assertEqual(p.data, r.binary_data)
def test_get_pem_data_none(self):
text = ''
text += 'bla bla\n'
text += 'yada yada yada\n'
text += 'burfl blatz bingo\n'
data = pemutils.get_pem_data(text)
self.assertIsNone(data)
def test_get_pem_data_invalid(self):
p = PEM(pem_type='xxx',
pem_header='XXX')
text = p.pem_text
self.assertRaises(ValueError,
pemutils.get_pem_data, text)
def test_get_pem_data(self):
data_size = 70
count = len(headers)
pems = []
for i in moves.range(count):
pems.append(PEM(pem_type=headers[i][0],
pem_header=headers[i][1],
data_size=data_size + i,
data_offset=i))
for i in moves.range(count):
p = pems[i]
text = p.pem_text
data = pemutils.get_pem_data(text, p.pem_type)
self.assertEqual(p.data, data)
def test_is_pem(self):
data_size = 70
count = len(headers)
pems = []
for i in moves.range(count):
pems.append(PEM(pem_type=headers[i][0],
pem_header=headers[i][1],
data_size=data_size + i,
data_offset=i))
for i in moves.range(count):
p = pems[i]
text = p.pem_text
self.assertTrue(pemutils.is_pem(text, pem_type=p.pem_type))
self.assertFalse(pemutils.is_pem(text,
pem_type=p.pem_type + 'xxx'))
def test_base64_to_pem(self):
data_size = 70
count = len(headers)
pems = []
for i in moves.range(count):
pems.append(PEM(pem_type=headers[i][0],
pem_header=headers[i][1],
data_size=data_size + i,
data_offset=i))
for i in moves.range(count):
p = pems[i]
pem = pemutils.base64_to_pem(p.base64_text, p.pem_type)
self.assertEqual(pemutils.get_pem_data(pem, p.pem_type), p.data)
def test_binary_to_pem(self):
data_size = 70
count = len(headers)
pems = []
for i in moves.range(count):
pems.append(PEM(pem_type=headers[i][0],
pem_header=headers[i][1],
data_size=data_size + i,
data_offset=i))
for i in moves.range(count):
p = pems[i]
pem = pemutils.binary_to_pem(p.data, p.pem_type)
self.assertEqual(pemutils.get_pem_data(pem, p.pem_type), p.data)
|
py | b409e29efcf0eccd5294ef30053421c1ba3289a4 | #!/bin/python
"""
This module holds an Inmoov wrist
Authors:
Brett Creeley
Matty Baba Allos
"""
from Servo import Servo
class Wrist(object):
""" This class represents an Inmoov Wrist """
"""
Todo: Pull apart Inmoov's forearm to find out servo models for a Wrist.
- These values are just copied from the HS-805BB Servo.
"""
def __init__(self, servo):
""" Set the Servo for this Wrist """
if servo is None:
raise Exception("Could not initiate wrist")
self.servo = servo
def initialize(self):
self.servo.initialize()
def rotate(self, degree):
""" Rotate this Wrist the desired degree """
self.servo.rotate(degree)
def off(self):
""" Turn off all fingers off"""
self.servo.off()
|
py | b409e2bebf124eca303267812654fec1924056c9 | #!/home/emily/anaconda2/bin/python
from cec2013.cec2013 import *
import numpy as np
import sys
import pandas as pd
import glob
import os
# This script expects two command line arguments:
# - a glob-style pattern indicating which directories to analyze
# (remember to put quotation marks around it)
# - the name of a file to store data in
# It will extract the path (x, y, and z coordinates) taken by the fittest
# lineage from the phylogeny_5000.csv file from each directory matched by
# the glob pattern provided in the first command-line argument.
def main():
glob_pattern = sys.argv[1]
outfilename = sys.argv[2]
all_data = pd.DataFrame()
# print(glob_pattern, glob.glob(glob_pattern))
problem_map = {0:4, 1:5, 2:6, 6:12}
for dirname in glob.glob(glob_pattern):
print(dirname)
run_log = dirname + "/run.log"
filename = dirname + "/pop_5000/phylogeny_5000.csv"
if not (os.path.exists(run_log) and os.path.exists(filename)):
print("skipped")
continue
local_data = {}
with open(run_log) as run_log_file:
for line in run_log_file:
if line.startswith("Doing initial"):
break
elif not line.startswith("set"):
continue
line = line.split()
local_data[line[1]] = line[2]
# Create function
f = CEC2013(problem_map[int(local_data["PROBLEM"])])
assert(f.get_dimension() == 2)
df = pd.read_csv(filename, index_col="id")
df.rename(str.strip, axis="columns", inplace=True)
# print(df.columns.values)
genotypes = [i.strip("[] ").split() for i in df["info"]]
genotypes = map(lambda x: [float(x[0]), float(x[1])], genotypes)
df["fitness"] = [f.evaluate(np.array(x)) for x in genotypes]
genotypes = zip(*genotypes)
df["x"] = genotypes[0]
df["y"] = genotypes[1]
paths = []
ids = []
next_gen = set([])
curr_id = int(df[df["num_orgs"] > 0]["fitness"].idxmax())
path = []
fits = []
x_magnitude = 0
y_magnitude = 0
total_magnitude = 0
deleterious_steps = 0
ben_steps = 0
neutral_steps = 0
while (curr_id > 1):
# print(df.loc[curr_id, :])
fits.append(df.loc[curr_id, "fitness"])
path.append(" ".join(["{:.2f}".format(i) for i in [df.loc[curr_id, "x"], df.loc[curr_id, "y"], df.loc[curr_id, "fitness"]]]))
if (df.loc[curr_id, "parent_id"] == 1):
break
x_magnitude += abs(df.loc[df.loc[curr_id, "parent_id"], "x"] - df.loc[curr_id, "x"])
y_magnitude += abs(df.loc[df.loc[curr_id, "parent_id"], "y"] - df.loc[curr_id, "y"])
total_magnitude += abs(df.loc[df.loc[curr_id, "parent_id"], "x"] - df.loc[curr_id, "x"]) + abs(df.loc[df.loc[curr_id, "parent_id"], "y"] - df.loc[curr_id, "y"])
if (df.loc[curr_id, "fitness"] > df.loc[df.loc[curr_id, "parent_id"], "fitness"]):
ben_steps += 1
elif (df.loc[curr_id, "fitness"] == df.loc[df.loc[curr_id, "parent_id"], "fitness"]):
neutral_steps += 1
else:
deleterious_steps += 1
curr_id = df.loc[curr_id, "parent_id"]
paths.append(",".join(path))
ids.append(curr_id)
fits = pd.DataFrame(fits)
temp = pd.DataFrame({"path": paths, "lin_id": ids})
for k in local_data:
temp[k] = local_data[k]
temp["phenotypic_volatility"] = fits.var()
temp["rolling_mean_1000_volatility"] = fits.rolling(1000).mean().var()
temp["rolling_mean_500_volatility"] = fits.rolling(500).mean().var()
temp["rolling_mean_100_volatility"] = fits.rolling(100).mean().var()
temp["rolling_mean_50_volatility"] = fits.rolling(50).mean().var()
temp["x_magnitude"] = x_magnitude
temp["y_magnitude"] = y_magnitude
temp["total_magnitude"] = total_magnitude
all_data = pd.concat([all_data, temp])
all_data.to_csv(outfilename,index=False)
#print(df)
# Evaluate :-)
# x = np.ones(2)
# value = f.evaluate(x)
if __name__ == "__main__":
main()
|
py | b409e2cbed2d4267399645f11a482253c6469464 | """
The ledger_entry method returns a single ledger
object from the XRP Ledger in its raw format.
See ledger format for information on the
different types of objects you can retrieve.
`See ledger entry <https://xrpl.org/ledger_entry.html>`_
"""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Union
from xrpl.models.base_model import BaseModel
from xrpl.models.requests.request import Request, RequestMethod
from xrpl.models.required import REQUIRED
from xrpl.models.utils import require_kwargs_on_init
@require_kwargs_on_init
@dataclass(frozen=True)
class DepositPreauth(BaseModel):
"""
Required fields for requesting a DepositPreauth if not querying by
object ID.
"""
owner: str = REQUIRED # type: ignore
"""
This field is required.
:meta hide-value:
"""
authorized: str = REQUIRED # type: ignore
"""
This field is required.
:meta hide-value:
"""
@require_kwargs_on_init
@dataclass(frozen=True)
class Directory(BaseModel):
"""
Required fields for requesting a DirectoryNode if not querying by
object ID.
"""
owner: str = REQUIRED # type: ignore
"""
This field is required.
:meta hide-value:
"""
dir_root: str = REQUIRED # type: ignore
"""
This field is required.
:meta hide-value:
"""
sub_index: Optional[int] = None
@require_kwargs_on_init
@dataclass(frozen=True)
class Escrow(BaseModel):
"""
Required fields for requesting a Escrow if not querying by
object ID.
"""
owner: str = REQUIRED # type: ignore
"""
This field is required.
:meta hide-value:
"""
seq: int = REQUIRED # type: ignore
"""
This field is required.
:meta hide-value:
"""
@require_kwargs_on_init
@dataclass(frozen=True)
class Offer(BaseModel):
"""
Required fields for requesting a Offer if not querying by
object ID.
"""
account: str = REQUIRED # type: ignore
"""
This field is required.
:meta hide-value:
"""
seq: int = REQUIRED # type: ignore
"""
This field is required.
:meta hide-value:
"""
@require_kwargs_on_init
@dataclass(frozen=True)
class RippleState(BaseModel):
"""Required fields for requesting a RippleState."""
accounts: List[str] = REQUIRED # type: ignore
"""
This field is required.
:meta hide-value:
"""
currency: str = REQUIRED # type: ignore
"""
This field is required.
:meta hide-value:
"""
@require_kwargs_on_init
@dataclass(frozen=True)
class Ticket(BaseModel):
"""
Required fields for requesting a Ticket, if not querying by
object ID.
"""
owner: str = REQUIRED # type: ignore
"""
This field is required.
:meta hide-value:
"""
ticket_sequence: int = REQUIRED # type: ignore
"""
This field is required.
:meta hide-value:
"""
@require_kwargs_on_init
@dataclass(frozen=True)
class LedgerEntry(Request):
"""
The ledger_entry method returns a single ledger
object from the XRP Ledger in its raw format.
See ledger format for information on the
different types of objects you can retrieve.
`See ledger entry <https://xrpl.org/ledger_entry.html>`_
"""
method: RequestMethod = field(default=RequestMethod.LEDGER_ENTRY, init=False)
index: Optional[str] = None
account_root: Optional[str] = None
check: Optional[str] = None
deposit_preauth: Optional[Union[str, DepositPreauth]] = None
directory: Optional[Union[str, Directory]] = None
escrow: Optional[Union[str, Escrow]] = None
offer: Optional[Union[str, Offer]] = None
payment_channel: Optional[str] = None
ripple_state: Optional[RippleState] = None
ticket: Optional[Union[str, Ticket]] = None
binary: bool = False
ledger_hash: Optional[str] = None
ledger_index: Optional[Union[str, int]] = None
def _get_errors(self: LedgerEntry) -> Dict[str, str]:
errors = super()._get_errors()
query_params = [
param
for param in [
self.index,
self.account_root,
self.directory,
self.offer,
self.ripple_state,
self.check,
self.escrow,
self.payment_channel,
self.deposit_preauth,
self.ticket,
]
if param is not None
]
if len(query_params) != 1:
errors["LedgerEntry"] = "Must choose exactly one data to query"
return errors
|
py | b409e2d02217fb6fc23f3744ee5a132140a164fb |
import os.path
from flask import Flask, request
app = Flask(__name__)
@app.route("/path1")
def path_injection():
path = request.args.get('path', '')
f = open(os.path.join(os.getcwd(), path))
@app.route("/path2")
def path_injection():
# Normalized, but not checked
path = request.args.get('path', '')
npath = os.path.normpath(os.path.join(os.getcwd(), path))
f = open(npath)
SAFE = "/tmp/scratch_area/"
@app.route("/path3")
def safe_path():
# Normalized, but check doesn't reach open().
path = request.args.get('path', '')
npath = os.path.normpath(os.path.join(os.getcwd(), path))
if npath.startswith(SAFE):
pass
f = open(npath)
@app.route("/path4")
def safe_path():
# Normalized, and checked properly
path = request.args.get('path', '')
npath = os.path.normpath(os.path.join(os.getcwd(), path))
if npath.startswith(SAFE):
f = open(npath)
|
py | b409e2f2b76e60957049f507116cbd06a8b3fb17 | #!/usr/bin/python3
import tga
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
# new 100x100 black image
data = [
[black for x in range(100)] for y in range(100)
]
data[10][41] = red # set pixel
data = data[::-1] # flip vertically
# save as TGA
image = tga.Image(data)
image.save("lesson00.tga")
print("DONE") |
py | b409e3711c5c54ab18845780acbed5dadd59f8cb | import math
import os
import random
import torch
from ncc import LOGGER
from ncc import tasks
from ncc.data import iterators
from ncc.trainers.ncc_trainers import Trainer
from ncc.utils import checkpoint_utils, distributed_utils
from ncc.utils import set_seed
from ncc.utils import utils
from ncc.utils.file_ops.yaml_io import load_yaml
from ncc.utils.logging import meters
from ncc.utils.logging import metrics, progress_bar
from ncc.utils.path_manager import PathManager
@metrics.aggregate('train')
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args['distributed_training']['fix_batches_to_gpus'],
shuffle=(epoch_itr.next_epoch_idx > args['dataset']['curriculum']),
)
update_freq = (
args['optimization']['update_freq'][epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args['optimization']['update_freq'])
else args['optimization']['update_freq'][-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.progress_bar(
itr,
log_format=args['common']['log_format'],
log_interval=args['common']['log_interval'],
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args['common']['tensorboard_logdir'] if distributed_utils.is_master(args) else None
),
default_log_format=('tqdm' if not args['common']['no_progress_bar'] else 'simple'),
)
# task specific setup per epoch
task.begin_epoch(epoch_itr.epoch, trainer.get_model())
valid_subsets = args['dataset']['valid_subset'].split(',')
max_update = args['optimization']['max_update'] or math.inf
for samples in progress:
with metrics.aggregate('train_inner'):
log_output = trainer.train_step(samples)
if log_output is None: # OOM, overflow, ...
continue
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args['common']['log_interval'] == 0:
stats = get_training_stats(metrics.get_smoothed_values('train_inner'))
progress.log(stats, tag='train_inner', step=num_updates)
# reset epoch-level meters
metrics.reset_meters('train_inner')
if (
not args['dataset']['disable_validation']
and args['checkpoint']['save_interval_updates'] > 0
and num_updates % args['checkpoint']['save_interval_updates'] == 0
and num_updates > 0
):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if num_updates >= max_update:
break
# log end-of-epoch stats
stats = get_training_stats(metrics.get_smoothed_values('train'))
progress.print(stats, tag='train', step=num_updates)
# reset epoch-level meters
metrics.reset_meters('train')
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args['dataset']['fixed_validation_seed'] is not None:
# set fixed seed for every validation
set_seed.set_torch_seed(args['dataset']['fixed_validation_seed'])
valid_losses = []
for subset in subsets:
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=task.dataset(subset),
max_tokens=args['dataset']['max_tokens_valid'],
max_sentences=args['dataset']['max_sentences_valid'],
max_positions=utils.resolve_max_positions(
task.max_positions(),
trainer.get_model().max_positions(),
),
ignore_invalid_inputs=args['dataset']['skip_invalid_size_inputs_valid_test'],
required_batch_size_multiple=args['dataset']['required_batch_size_multiple'],
seed=args['common']['seed'],
num_shards=args['distributed_training']['distributed_world_size'],
shard_id=args['distributed_training']['distributed_rank'],
num_workers=args['dataset']['num_workers'],
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args['common']['log_format'],
log_interval=args['common']['log_interval'],
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args['common']['tensorboard_logdir'] if distributed_utils.is_master(args) else None
),
default_log_format=('tqdm' if not args['common']['no_progress_bar'] else 'simple'),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
log_output = trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args['checkpoint']['best_checkpoint_metric']])
return valid_losses
def get_valid_stats(args, trainer, stats):
if 'nll_loss' in stats and 'ppl' not in stats:
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
stats['num_updates'] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, 'best'):
key = 'best_{0}'.format(args['checkpoint']['best_checkpoint_metric'])
best_function = max if args['checkpoint']['maximize_best_checkpoint_metric'] else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[args['checkpoint']['best_checkpoint_metric']],
)
return stats
def get_training_stats(stats):
if 'nll_loss' in stats and 'ppl' not in stats:
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
stats['wall'] = round(metrics.get_meter('default', 'wall').elapsed_time, 0)
return stats
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args['checkpoint']['patience'] <= 0:
return False
def is_better(a, b):
return a > b if args['checkpoint']['maximize_best_checkpoint_metric'] else a < b
prev_best = getattr(should_stop_early, 'best', None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args['checkpoint']['patience']:
LOGGER.info('early stop since valid performance hasn\'t improved for last {} runs'.format(
args['checkpoint']['patience']))
return should_stop_early.num_runs >= args['checkpoint']['patience']
def single_main(args, init_distributed=False):
assert args['dataset']['max_tokens'] is not None or args['dataset']['max_sentences'] is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
metrics.reset()
# 0. Initialize CUDA and distributed training
if torch.cuda.is_available() and not args['common']['cpu']:
torch.cuda.set_device(args['distributed_training']['device_id'])
set_seed.set_seed(args['common']['seed'])
if init_distributed:
args['distributed_training']['distributed_rank'] = distributed_utils.distributed_init(args)
# Verify checkpoint directory
if distributed_utils.is_master(args):
save_dir = args['checkpoint']['save_dir']
checkpoint_utils.verify_checkpoint_directory(save_dir)
PathManager.rm(os.path.join(save_dir, '*.pt')) # this code will remove pre-trained models
# 1. Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# 2. Load valid dataset (we load training data below, based on the latest checkpoint)
task.load_dataset(args['dataset']['valid_subset'], combine=False, epoch=1)
# 3. Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
LOGGER.info(model)
LOGGER.info('model {}, criterion {}'.format(args['model']['arch'], criterion.__class__.__name__))
LOGGER.info('num. model params: {} (num. trained: {})'.format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
))
# 4. Build trainer
trainer = Trainer(args, task, model, criterion)
# trainer = CSN_Trainer(args, task, model, criterion)
LOGGER.info('training on {} GPUs'.format(args['distributed_training']['distributed_world_size']))
LOGGER.info('max tokens per GPU = {} and max sentences per GPU = {}'.format(
args['dataset']['max_tokens'],
args['dataset']['max_sentences'],
))
# 5. Load the latest checkpoint if one is available and restore the corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer, combine=False)
# 6. Train until the learning rate gets too small
max_epoch = args['optimization']['max_epoch'] or math.inf
max_update = args['optimization']['max_update'] or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
valid_subsets = args['dataset']['valid_subset'].split(',')
while (
lr > args['optimization']['min_lr']
and epoch_itr.next_epoch_idx <= max_epoch
and trainer.get_num_updates() < max_update
):
# train for one epoch
train(args, trainer, task, epoch_itr)
if not args['dataset']['disable_validation'] and epoch_itr.epoch % args['dataset']['validate_interval'] == 0:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
else:
valid_losses = [None]
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch + 1, valid_losses[0])
# save checkpoint
if epoch_itr.epoch % args['checkpoint']['save_interval'] == 0:
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
# early stop
if should_stop_early(args, valid_losses[0]):
LOGGER.info('early stop since valid performance hasn\'t improved for last {} runs'.format(
args['checkpoint']['patience']))
break
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
combine=False, # TODO to be checked
# sharded data: get train iterator for next epoch
load_dataset=(os.pathsep in args['task']['data']),
)
train_meter.stop()
LOGGER.info('done training in {:.1f} seconds'.format(train_meter.sum))
def distributed_main(i, args, start_rank=0):
args['distributed_training']['device_id'] = i
if args['distributed_training']['distributed_rank'] is None: # torch.multiprocessing.spawn
args['distributed_training']['distributed_rank'] = start_rank + i
single_main(args, init_distributed=True)
def cli_main():
import argparse
parser = argparse.ArgumentParser(
description="Downloading/Decompressing code_search_net dataset(s) or Tree-Sitter Library(ies)")
parser.add_argument(
"--yaml_file", "-f", type=str, help="load {yaml_file}.yml for train",
default='config/csn/all'
)
args = parser.parse_args()
yaml_file = os.path.join(os.path.dirname(__file__), '{}.yml'.format(args.yaml_file))
LOGGER.info('Load arguments in {}'.format(yaml_file))
args = load_yaml(yaml_file)
LOGGER.info(args)
if args['distributed_training']['distributed_init_method'] is None:
distributed_utils.infer_init_method(args)
if args['distributed_training']['distributed_init_method'] is not None:
# distributed training
if torch.cuda.device_count() > 1 and not args['distributed_training']['distributed_no_spawn']:
start_rank = args['distributed_training']['distributed_rank']
args['distributed_training']['distributed_rank'] = None # assign automatically
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, start_rank),
nprocs=torch.cuda.device_count(),
)
else:
distributed_main(args['distributed_training']['device_id'], args)
elif args['distributed_training']['distributed_world_size'] > 1:
# fallback for single node with multiple GPUs
assert args['distributed_training']['distributed_world_size'] <= torch.cuda.device_count()
port = random.randint(10000, 20000)
args['distributed_training']['distributed_init_method'] = 'tcp://localhost:{port}'.format(port=port)
args['distributed_training']['distributed_rank'] = None # set based on device id
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args,),
nprocs=args['distributed_training']['distributed_world_size'],
)
else:
LOGGER.info('single GPU training...')
single_main(args)
if __name__ == '__main__':
cli_main()
|
py | b409e37b23a28b4b226a769de057f5162a250c45 |
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'leftTYPECASTrightUMINUSrightUNOTleftMASMENOSleftPOTENCIAleftPORDIVRESIDUOleftANDORSIMBOLOOR2SIMBOLOORSIMBOLOAND2leftDESPLAZAMIENTOIZQUIERDADESPLAZAMIENTODERECHAABS ACOS ACOSD ACOSH ADD ALL ALTER AND ANY AS ASC ASIN ASIND ASINH ATAN ATAN2 ATAN2D ATAND ATANH AUTO_INCREMENT AVG BEGIN BETWEEN BIGINT BOOLEAN BOTH BY CADENA CASE CBRT CEIL CEILING CHAR CHARACTER CHECK COLOCHO COLUMN COLUMNS COMA CONCAT CONSTRAINT CONT CONVERT CORCHETEDER CORCHETEIZQ COS COSD COSH COT COTD CREATE CURRENT_USER DATABASE DATABASES DATE DAY DECIMAL DECIMALTOKEN DECLARE DECODE DEFAULT DEGREES DELETE DESC DESPLAZAMIENTODERECHA DESPLAZAMIENTOIZQUIERDA DIFERENTE DISTINCT DIV DIV DOSPUNTOS DOUBLE DROP ELSE ENCODE END ENTERO ENUM ENUM ESCAPE ETIQUETA EXCEPT EXISTS EXP FACTORIAL FALSE FIRST FLOOR FOR FOREIGN FROM FULL FUNCTION GCD GET_BYTE GREATEST GROUP HAVING HOUR ID IF IGUAL IGUALIGUAL ILIKE IN INHERITS INNER INSERT INTEGER INTERSECT INTERVAL INTO IS ISNULL JOIN KEY LAST LCM LEADING LEAST LEFT LENGTH LIKE LIMIT LN LOG LOG10 MAS MAX MAYOR MAYORIGUAL MD5 MENOR MENORIGUAL MENOS MIN MINUTE MIN_SCALE MOD MODE MONEY MONTH NATURAL NOT NOTEQUAL NOTNULL NOW NULL NULLS NUMERAL NUMERIC OF OFFSET ON ONLY OR ORDER OUTER OWNER PARENTESISDERECHA PARENTESISIZQUIERDA PI POR POTENCIA POWER PRECISION PRIMARY PUNTO PUNTOYCOMA RADIANS RANDOM REAL REFERENCES RENAME REPLACE RESIDUO RETURNING RETURNS RIGHT ROUND SCALE SECOND SELECT SESSION_USER SET SETSEED SET_BYTE SHA256 SHOW SIGN SIMBOLOAND SIMBOLOAND2 SIMBOLOOR SIMBOLOOR2 SIN SIND SINH SMALLINT SOME SQRT SUBSTR SUBSTRING SUM SYMMETRIC TABLE TABLES TAN TAND TANH TEXT THEN TIME TIMESTAMP TO TRAILING TRIM TRIM_SCALE TRUE TRUNC TYPE TYPECAST UNION UNIQUE UNKNOWN UPDATE UPPER USE USING VALUES VARCHAR VARYING VIEW WHEN WHERE WIDTH_BUCKET YEARinicio : queriesqueries : queries queryqueries : queryquery : mostrarBD\n | crearBD\n | alterBD\n | dropBD\n | useBD\n | operacion\n | insertinBD\n | updateinBD\n | deleteinBD\n | createTable\n | inheritsBD\n | dropTable\n | alterTable\n | variantesAt\n | contAdd\n | contDrop\n | contAlter\n | listaid\n | tipoAlter \n | selectData\n crearBD : CREATE DATABASE ID PUNTOYCOMAcrearBD : CREATE DATABASE IF NOT EXISTS ID PUNTOYCOMAcrearBD : CREATE OR REPLACE DATABASE ID PUNTOYCOMAcrearBD : CREATE OR REPLACE DATABASE IF NOT EXISTS ID PUNTOYCOMAcrearBD : CREATE DATABASE ID parametrosCrearBD PUNTOYCOMAcrearBD : CREATE DATABASE IF NOT EXISTS ID parametrosCrearBD PUNTOYCOMAcrearBD : CREATE OR REPLACE DATABASE ID parametrosCrearBD PUNTOYCOMAcrearBD : CREATE OR REPLACE DATABASE IF NOT EXISTS ID parametrosCrearBD PUNTOYCOMAparametrosCrearBD : parametrosCrearBD parametroCrearBDparametrosCrearBD : parametroCrearBDparametroCrearBD : OWNER IGUAL final\n | MODE IGUAL final\n useBD : USE ID PUNTOYCOMAmostrarBD : SHOW DATABASES PUNTOYCOMAalterBD : ALTER DATABASE ID RENAME TO ID PUNTOYCOMAalterBD : ALTER DATABASE ID OWNER TO parametroAlterUser PUNTOYCOMA parametroAlterUser : CURRENT_USER \n parametroAlterUser : SESSION_USER\n parametroAlterUser : final dropTable : DROP TABLE ID PUNTOYCOMA\n alterTable : ALTER TABLE ID variantesAt PUNTOYCOMA\n\n \n variantesAt : ADD contAdd\n | ALTER contAlter\n | DROP contDrop\n \n listaContAlter : listaContAlter COMA contAlter \n \n listaContAlter : contAlter\n \n contAlter : COLUMN ID SET NOT NULL \n | COLUMN ID TYPE tipo\n \n contAdd : COLUMN ID tipo \n | CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID\n | PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA\n | CONSTRAINT ID FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA ID PARENTESISDERECHA\n | CONSTRAINT ID PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA\n | CONSTRAINT ID UNIQUE PARENTESISIZQUIERDA ID PARENTESISDERECHA\n \n contDrop : COLUMN ID \n | CONSTRAINT ID\n | PRIMARY KEY\n \n listaid : listaid COMA ID\n \n listaid : ID\n \n tipoAlter : ADD \n | DROP\n dropBD : DROP DATABASE ID PUNTOYCOMAdropBD : DROP DATABASE IF EXISTS ID PUNTOYCOMAoperacion : operacion MAS operacion\n | operacion MENOS operacion\n | operacion POR operacion\n | operacion DIV operacion\n | operacion RESIDUO operacion\n | operacion POTENCIA operacion\n | operacion AND operacion\n | operacion OR operacion\n | operacion SIMBOLOOR2 operacion\n | operacion SIMBOLOOR operacion\n | operacion SIMBOLOAND2 operacion\n | operacion DESPLAZAMIENTOIZQUIERDA operacion\n | operacion DESPLAZAMIENTODERECHA operacion\n | operacion IGUAL operacion\n | operacion IGUALIGUAL operacion\n | operacion NOTEQUAL operacion\n | operacion MAYORIGUAL operacion\n | operacion MENORIGUAL operacion\n | operacion MAYOR operacion\n | operacion MENOR operacion\n | operacion DIFERENTE operacion\n | PARENTESISIZQUIERDA operacion PARENTESISDERECHA \n operacion : MENOS ENTERO %prec UMINUSoperacion : MENOS DECIMAL %prec UMINUSoperacion : NOT operacion %prec UNOToperacion : finalfuncionBasica : ABS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | CBRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | CEIL PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | CEILING PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | DEGREES PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | DIV PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n | EXP PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | FACTORIAL PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | FLOOR PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | GCD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n | LCM PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n | LN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | LOG PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | MOD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n | PI PARENTESISIZQUIERDA PARENTESISDERECHA\n | POWER PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n | RADIANS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | ROUND PARENTESISIZQUIERDA operacion PARENTESISDERECHA \n | SIGN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | SQRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | TRIM_SCALE PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | TRUNC PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | WIDTH_BUCKET PARENTESISIZQUIERDA operacion COMA operacion COMA operacion COMA operacion PARENTESISDERECHA\n | RANDOM PARENTESISIZQUIERDA PARENTESISDERECHA\n | GREATEST PARENTESISIZQUIERDA select_list PARENTESISDERECHA\n | LEAST PARENTESISIZQUIERDA select_list PARENTESISDERECHA\n | NOW PARENTESISIZQUIERDA PARENTESISDERECHA\n \n \n | ACOS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | ACOSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | ASIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | ASIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA \n | ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | ATAND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | ATAN2 PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n | ATAN2D PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n \n\n | COS PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n\t\t\t | COSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | COT PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | COTD PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | SIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | SIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | TAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | TAND PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | SINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n\n\n\n | COSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | TANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | ASINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | ACOSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | ATANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | LENGTH PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | TRIM PARENTESISIZQUIERDA opcionTrim operacion FROM operacion PARENTESISDERECHA\n | GET_BYTE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n | MD5 PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | SET_BYTE PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA\n | SHA256 PARENTESISIZQUIERDA operacion PARENTESISDERECHA \n | SUBSTR PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA\n | CONVERT PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA\n | ENCODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n | DECODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA\n | AVG PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n | SUM PARENTESISIZQUIERDA operacion PARENTESISDERECHA\n funcionBasica : SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion FOR operacion PARENTESISDERECHAfuncionBasica : SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion PARENTESISDERECHAfuncionBasica : SUBSTRING PARENTESISIZQUIERDA operacion FOR operacion PARENTESISDERECHA opcionTrim : LEADING\n | TRAILING\n | BOTH\n final : DECIMALfinal : ENTEROfinal : IDfinal : ID PUNTO IDfinal : CADENAinsertinBD : INSERT INTO ID VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMAinsertinBD : INSERT INTO ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMAlistaParam : listaParam COMA finallistaParam : finalupdateinBD : UPDATE ID SET asignaciones WHERE asignaciones PUNTOYCOMAasignaciones : asignaciones COMA asignaasignaciones : asignaasigna : ID IGUAL operaciondeleteinBD : DELETE FROM ID PUNTOYCOMAdeleteinBD : DELETE FROM ID WHERE operacion PUNTOYCOMAinheritsBD : CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA INHERITS PARENTESISIZQUIERDA ID PARENTESISDERECHA PUNTOYCOMAcreateTable : CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA PUNTOYCOMAcreaColumnas : creaColumnas COMA ColumnacreaColumnas : ColumnaColumna : ID tipoColumna : ID tipo paramOpcionalColumna : UNIQUE PARENTESISIZQUIERDA listaParam PARENTESISDERECHAColumna : constraintcheckColumna : checkinColumnColumna : primaryKeyColumna : foreignKeyparamOpcional : paramOpcional paramopcparamOpcional : paramopcparamopc : DEFAULT final\n | NULL\n | NOT NULL\n | UNIQUE\n | PRIMARY KEY\n paramopc : constraintcheckparamopc : checkinColumnparamopc : CONSTRAINT ID UNIQUEcheckinColumn : CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHAconstraintcheck : CONSTRAINT ID CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHAprimaryKey : PRIMARY KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHAforeignKey : FOREIGN KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHAtipo : SMALLINT\n | INTEGER\n | BIGINT\n | DECIMAL\n | NUMERIC\n | REAL\n | DOUBLE PRECISION\n | MONEY\n | VARCHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n | CHARACTER VARYING PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n | CHARACTER PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n | CHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA\n | TEXT\n | BOOLEAN\n | TIMESTAMP\n | TIME\n | INTERVAL\n | DATE\n | YEAR\n | MONTH \n | DAY\n | HOUR \n | MINUTE\n | SECOND\n selectData : SELECT select_list FROM select_list WHERE search_condition opcionesSelect PUNTOYCOMA\n | SELECT POR FROM select_list WHERE search_condition opcionesSelect PUNTOYCOMA\n selectData : SELECT select_list FROM select_list WHERE search_condition PUNTOYCOMA\n | SELECT POR FROM select_list WHERE search_condition PUNTOYCOMA\n selectData : SELECT select_list FROM select_list PUNTOYCOMA\n | SELECT POR FROM select_list PUNTOYCOMA\n selectData : SELECT select_list PUNTOYCOMA\n opcionesSelect : opcionesSelect opcionSelect\n opcionesSelect : opcionSelect\n opcionSelect : LIMIT operacion\n | GROUP BY select_list\n | HAVING select_list\n | ORDER BY select_list \n opcionSelect : LIMIT operacion OFFSET operacion\n | ORDER BY select_list ordenamiento \n ordenamiento : ASC\n | DESC search_condition : NOT search_conditionsearch_condition : operacionsearch_condition : PARENTESISIZQUIERDA search_condition PARENTESISDERECHA select_list : select_list COMA operacion select_list : select_list COMA asignacion select_list : asignacionselect_list : operacion asignacion : operacion AS operacion asignacion : final finalfuncionBasica : operacion BETWEEN operacion funcionBasica : operacion LIKE CADENAfuncionBasica : operacion IN PARENTESISIZQUIERDA select_list PARENTESISDERECHA funcionBasica : operacion NOT BETWEEN operacionfuncionBasica : operacion BETWEEN SYMMETRIC operacion funcionBasica : operacion NOT BETWEEN SYMMETRIC operacion funcionBasica : operacion IS DISTINCT FROM operacion funcionBasica : operacion IS NOT DISTINCT FROM operacion'
_lr_action_items = {'SHOW':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,205,207,213,216,217,222,234,255,264,270,275,277,281,297,304,305,307,308,312,318,320,332,340,341,345,346,347,349,351,362,363,376,379,386,387,399,409,411,414,],[24,24,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,-65,-162,-161,-93,-64,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,-67,-175,-209,-211,-212,-58,-25,-30,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-166,-225,-226,-27,-31,-176,-167,-56,]),'CREATE':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,205,207,213,216,217,222,234,255,264,270,275,277,281,297,304,305,307,308,312,318,320,332,340,341,345,346,347,349,351,362,363,376,379,386,387,399,409,411,414,],[25,25,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,-65,-162,-161,-93,-64,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,-67,-175,-209,-211,-212,-58,-25,-30,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-166,-225,-226,-27,-31,-176,-167,-56,]),'ALTER':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,137,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,205,207,213,216,217,222,234,255,264,270,275,277,281,297,304,305,307,308,312,318,320,332,340,341,345,346,347,349,351,362,363,376,379,386,387,399,409,411,414,],[28,28,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,-65,-162,-161,-93,-64,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,201,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,-67,-175,-209,-211,-212,-58,-25,-30,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-166,-225,-226,-27,-31,-176,-167,-56,]),'DROP':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,137,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,205,207,213,216,217,222,234,255,264,270,275,277,281,297,304,305,307,308,312,318,320,332,340,341,345,346,347,349,351,362,363,376,379,386,387,399,409,411,414,],[29,29,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,-65,-162,-161,-93,-64,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,204,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,-67,-175,-209,-211,-212,-58,-25,-30,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-166,-225,-226,-27,-31,-176,-167,-56,]),'USE':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,205,207,213,216,217,222,234,255,264,270,275,277,281,297,304,305,307,308,312,318,320,332,340,341,345,346,347,349,351,362,363,376,379,386,387,399,409,411,414,],[30,30,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,-65,-162,-161,-93,-64,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,-67,-175,-209,-211,-212,-58,-25,-30,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-166,-225,-226,-27,-31,-176,-167,-56,]),'PARENTESISIZQUIERDA':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,27,29,32,33,34,35,39,41,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,75,76,79,83,88,89,94,98,99,100,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,134,135,142,143,144,145,146,147,151,153,156,157,158,159,160,161,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,184,185,186,187,188,189,191,205,207,208,213,214,216,217,219,222,225,226,234,244,250,255,260,264,270,274,275,276,277,281,289,290,291,297,304,305,307,308,312,314,316,318,320,332,333,336,340,341,343,345,346,347,349,351,353,355,362,363,372,376,379,382,384,386,387,395,396,399,409,410,411,414,],[32,32,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,32,-65,32,-162,-161,-93,-64,99,32,-165,-2,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,-92,-163,-46,-47,-90,-91,-45,-59,32,180,181,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,198,-164,-59,-60,-61,-36,-89,209,181,-52,-201,-202,-203,-204,-205,-206,-208,218,220,221,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,227,32,-231,32,32,32,-24,-66,-43,257,-174,32,-51,-207,266,-53,271,272,-28,287,289,-44,32,-50,-55,316,-229,316,-230,-26,32,338,339,-67,-175,-209,-211,-212,-58,316,316,-25,-30,-177,370,372,-38,-39,377,-170,-210,-54,-57,-227,32,32,-228,-29,32,-166,-225,32,32,-226,-27,404,32,-31,-176,413,-167,-56,]),'MENOS':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,27,29,32,33,34,35,39,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,75,76,79,83,88,89,90,94,98,99,101,102,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,179,185,186,187,188,189,191,205,207,213,214,216,217,222,229,231,233,234,255,260,263,264,270,274,275,276,277,281,289,297,301,304,305,307,308,312,314,315,316,318,320,332,337,340,341,345,346,347,349,351,353,355,358,360,362,363,372,376,379,381,382,384,386,387,391,396,399,405,409,411,414,],[31,31,-3,-4,-5,-6,-7,-8,49,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,31,-65,31,-162,-161,-93,-64,31,-165,-2,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,49,-163,-46,-47,-90,-91,49,-45,-59,31,-61,-60,49,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,49,49,49,49,49,49,49,49,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,49,31,-231,31,31,31,-24,-66,-43,-174,31,-51,-207,-53,49,-93,49,-28,-44,31,49,-50,-55,31,-229,31,-230,-26,31,-67,49,-175,-209,-211,-212,-58,31,49,31,-25,-30,-177,49,-38,-39,-170,-210,-54,-57,-227,31,31,49,49,-228,-29,31,-166,-225,49,31,31,-226,-27,49,31,-31,49,-176,-167,-56,]),'NOT':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,27,29,32,33,34,35,39,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,75,76,79,83,88,89,94,98,99,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,132,135,142,143,144,145,146,153,154,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,185,186,187,188,189,191,205,207,213,214,216,217,222,234,240,255,260,264,270,274,275,276,277,281,284,289,297,304,305,307,308,312,314,316,318,320,322,323,325,327,329,330,332,340,341,345,346,347,349,351,353,355,362,363,365,366,367,368,372,373,376,379,382,384,386,387,389,396,399,401,409,411,414,],[27,27,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,27,-65,27,-162,-161,-93,-64,27,-165,-2,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,27,-92,-163,-46,-47,-90,-91,-45,-59,27,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,196,-164,-59,-60,-61,-36,-89,-52,215,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,27,-231,27,27,27,-24,-66,-43,-174,27,-51,-207,-53,-28,283,-44,27,-50,-55,314,-229,314,-230,-26,326,27,-67,-175,-209,-211,-212,-58,314,314,-25,-30,326,-188,-190,-192,-194,-195,-177,-38,-39,-170,-210,-54,-57,-227,27,27,-228,-29,-187,-189,-191,-193,27,-197,-166,-225,27,27,-226,-27,-196,27,-31,-198,-176,-167,-56,]),'INSERT':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,205,207,213,216,217,222,234,255,264,270,275,277,281,297,304,305,307,308,312,318,320,332,340,341,345,346,347,349,351,362,363,376,379,386,387,399,409,411,414,],[36,36,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,-65,-162,-161,-93,-64,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,-67,-175,-209,-211,-212,-58,-25,-30,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-166,-225,-226,-27,-31,-176,-167,-56,]),'UPDATE':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,205,207,213,216,217,222,234,255,264,270,275,277,281,297,304,305,307,308,312,318,320,332,340,341,345,346,347,349,351,362,363,376,379,386,387,399,409,411,414,],[37,37,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,-65,-162,-161,-93,-64,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,-67,-175,-209,-211,-212,-58,-25,-30,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-166,-225,-226,-27,-31,-176,-167,-56,]),'DELETE':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,205,207,213,216,217,222,234,255,264,270,275,277,281,297,304,305,307,308,312,318,320,332,340,341,345,346,347,349,351,362,363,376,379,386,387,399,409,411,414,],[38,38,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,-65,-162,-161,-93,-64,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,-67,-175,-209,-211,-212,-58,-25,-30,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-166,-225,-226,-27,-31,-176,-167,-56,]),'ADD':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,137,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,205,207,213,216,217,222,234,255,264,270,275,277,281,297,304,305,307,308,312,318,320,332,340,341,345,346,347,349,351,362,363,376,379,386,387,399,409,411,414,],[39,39,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,-65,-162,-161,-93,-64,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,203,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,-67,-175,-209,-211,-212,-58,-25,-30,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-166,-225,-226,-27,-31,-176,-167,-56,]),'COLUMN':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,28,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,201,203,204,205,207,213,216,217,222,234,255,264,270,275,277,281,297,304,305,307,308,312,318,320,332,340,341,345,346,347,349,351,362,363,376,379,386,387,399,409,411,414,],[40,40,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,80,84,-162,-161,-93,95,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,80,95,84,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,-67,-175,-209,-211,-212,-58,-25,-30,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-166,-225,-226,-27,-31,-176,-167,-56,]),'CHECK':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,198,203,205,207,213,216,217,222,234,255,264,270,275,277,281,284,286,288,297,304,305,307,308,312,318,320,322,323,325,327,329,330,332,340,341,345,346,347,349,351,362,363,365,366,367,368,369,373,376,379,386,387,389,399,401,409,411,414,],[41,41,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,-65,-162,-161,-93,41,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,250,41,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,250,250,336,-67,-175,-209,-211,-212,-58,-25,-30,250,-188,-190,-192,-194,-195,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-187,-189,-191,-193,336,-197,-166,-225,-226,-27,-196,-31,-198,-176,-167,-56,]),'FOREIGN':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,152,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,198,203,205,207,213,216,217,222,234,255,264,270,275,277,281,286,297,304,305,307,308,312,318,320,332,340,341,345,346,347,349,351,362,363,376,379,386,387,399,409,411,414,],[42,42,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,-65,-162,-161,-93,42,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,182,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,182,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,252,42,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,252,-67,-175,-209,-211,-212,-58,-25,-30,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-166,-225,-226,-27,-31,-176,-167,-56,]),'PRIMARY':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,152,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,198,203,204,205,207,213,216,217,222,234,255,264,270,275,277,281,284,286,297,304,305,307,308,312,318,320,322,323,325,327,329,330,332,340,341,345,346,347,349,351,362,363,365,366,367,368,373,376,379,386,387,389,399,401,409,411,414,],[43,43,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,86,-162,-161,-93,96,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,183,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,183,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,251,96,86,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,328,251,-67,-175,-209,-211,-212,-58,-25,-30,328,-188,-190,-192,-194,-195,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-187,-189,-191,-193,-197,-166,-225,-226,-27,-196,-31,-198,-176,-167,-56,]),'CONSTRAINT':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,198,203,204,205,207,213,216,217,222,234,255,264,270,275,277,281,284,286,297,304,305,307,308,312,318,320,322,323,325,327,329,330,332,340,341,345,346,347,349,351,362,363,365,366,367,368,373,376,379,386,387,389,399,401,409,411,414,],[44,44,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,85,-162,-161,-93,97,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,249,97,85,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,331,249,-67,-175,-209,-211,-212,-58,-25,-30,331,-188,-190,-192,-194,-195,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-187,-189,-191,-193,-197,-166,-225,-226,-27,-196,-31,-198,-176,-167,-56,]),'ID':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,27,29,30,32,33,34,35,37,39,40,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,71,73,74,75,76,77,78,79,80,81,82,83,84,85,88,89,91,93,94,95,97,98,99,101,102,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,148,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,180,181,185,186,187,188,189,191,197,198,205,206,207,209,213,214,216,217,222,227,231,234,236,237,238,249,253,254,255,257,260,261,262,264,270,271,272,274,275,276,277,281,286,287,289,297,300,304,305,307,308,309,312,314,316,318,320,321,324,331,332,338,339,340,341,345,346,347,349,351,353,355,362,363,370,372,376,377,378,379,382,384,386,387,396,399,402,404,409,411,413,414,],[26,26,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,76,-65,87,76,-162,-161,-93,92,-64,98,102,76,-165,-2,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,76,129,131,134,135,-92,-163,136,137,-46,138,139,141,-47,142,143,-90,-91,147,149,-45,150,152,-59,76,-61,-60,76,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,210,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,223,224,76,-231,76,76,76,-24,239,241,-66,256,-43,76,-174,76,-51,-207,-53,273,76,-28,76,76,280,288,292,76,-44,76,76,210,210,-50,-55,310,311,76,-229,76,-230,-26,241,76,76,-67,76,-175,-209,-211,-212,347,-58,76,76,-25,-30,364,76,369,-177,76,76,-38,-39,-170,-210,-54,-57,-227,76,76,-228,-29,390,76,-166,76,395,-225,76,76,-226,-27,76,-31,410,412,-176,-167,76,-56,]),'SELECT':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,205,207,213,216,217,222,234,255,264,270,275,277,281,297,304,305,307,308,312,318,320,332,340,341,345,346,347,349,351,362,363,376,379,386,387,399,409,411,414,],[45,45,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,-65,-162,-161,-93,-64,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,-67,-175,-209,-211,-212,-58,-25,-30,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-166,-225,-226,-27,-31,-176,-167,-56,]),'DECIMAL':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,27,29,31,32,33,34,35,39,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,75,76,79,83,88,89,94,98,99,101,102,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,150,153,155,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,185,186,187,188,189,191,205,207,209,213,214,216,217,222,231,234,236,237,241,254,255,257,260,264,270,274,275,276,277,281,287,289,297,300,304,305,307,308,312,314,316,318,320,324,332,338,339,340,341,345,346,347,349,351,353,355,362,363,372,376,377,379,382,384,386,387,396,399,409,411,413,414,],[34,34,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,34,-65,89,34,-162,-161,-93,-64,34,-165,-2,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,34,-92,-163,-46,-47,-90,-91,-45,159,34,-61,-60,34,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,159,-52,159,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,34,-231,34,34,34,-24,-66,-43,34,-174,34,-51,-207,-53,34,-28,34,34,159,34,-44,34,34,-50,-55,34,-229,34,-230,-26,34,34,-67,34,-175,-209,-211,-212,-58,34,34,-25,-30,34,-177,34,34,-38,-39,-170,-210,-54,-57,-227,34,34,-228,-29,34,-166,34,-225,34,34,-226,-27,34,-31,-176,-167,34,-56,]),'ENTERO':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,27,29,31,32,33,34,35,39,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,75,76,79,83,88,89,94,98,99,101,102,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,185,186,187,188,189,191,205,207,209,213,214,216,217,218,220,221,222,231,234,236,237,254,255,257,260,264,266,270,274,275,276,277,281,287,289,297,300,304,305,307,308,312,314,316,318,320,324,332,338,339,340,341,345,346,347,349,351,353,355,362,363,372,376,377,379,382,384,386,387,396,399,409,411,413,414,],[33,33,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,33,-65,88,33,-162,-161,-93,-64,33,-165,-2,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,-92,-163,-46,-47,-90,-91,-45,-59,33,-61,-60,33,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,33,-231,33,33,33,-24,-66,-43,33,-174,33,-51,-207,265,267,268,-53,33,-28,33,33,33,-44,33,33,-50,306,-55,33,-229,33,-230,-26,33,33,-67,33,-175,-209,-211,-212,-58,33,33,-25,-30,33,-177,33,33,-38,-39,-170,-210,-54,-57,-227,33,33,-228,-29,33,-166,33,-225,33,33,-226,-27,33,-31,-176,-167,33,-56,]),'CADENA':([0,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,27,29,32,33,34,35,39,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,75,76,79,83,88,89,94,98,99,101,102,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,185,186,187,188,189,191,205,207,209,213,214,216,217,222,231,234,236,237,254,255,257,260,264,270,274,275,276,277,281,287,289,297,300,304,305,307,308,312,314,316,318,320,324,332,338,339,340,341,345,346,347,349,351,353,355,362,363,372,376,377,379,382,384,386,387,396,399,409,411,413,414,],[46,46,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,46,-65,46,-162,-161,-93,-64,46,-165,-2,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,-92,-163,-46,-47,-90,-91,-45,-59,46,-61,-60,46,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,46,-231,46,46,46,-24,-66,-43,46,-174,46,-51,-207,-53,46,-28,46,46,46,-44,46,46,-50,-55,46,-229,46,-230,-26,46,46,-67,46,-175,-209,-211,-212,-58,46,46,-25,-30,46,-177,46,46,-38,-39,-170,-210,-54,-57,-227,46,46,-228,-29,46,-166,46,-225,46,46,-226,-27,46,-31,-176,-167,46,-56,]),'$end':([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,26,29,33,34,35,39,46,47,75,76,79,83,88,89,94,98,101,102,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,135,142,143,144,145,146,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,186,191,205,207,213,216,217,222,234,255,264,270,275,277,281,297,304,305,307,308,312,318,320,332,340,341,345,346,347,349,351,362,363,376,379,386,387,399,409,411,414,],[0,-1,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,-17,-18,-19,-20,-21,-22,-23,-63,-65,-162,-161,-93,-64,-165,-2,-92,-163,-46,-47,-90,-91,-45,-59,-61,-60,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-37,-164,-59,-60,-61,-36,-89,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-231,-24,-66,-43,-174,-51,-207,-53,-28,-44,-50,-55,-229,-230,-26,-67,-175,-209,-211,-212,-58,-25,-30,-177,-38,-39,-170,-210,-54,-57,-227,-228,-29,-166,-225,-226,-27,-31,-176,-167,-56,]),'MAS':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[48,-163,-162,-161,-93,-165,48,-163,-90,-91,48,48,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,48,48,48,48,48,48,48,48,-164,-89,48,48,-93,48,48,48,48,48,48,48,48,48,48,]),'POR':([9,26,33,34,35,45,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[50,-163,-162,-161,-93,104,-165,50,-163,-90,-91,50,50,-93,50,50,-70,-71,-72,50,-74,-75,-76,-77,-78,-79,-80,50,50,50,50,50,50,50,50,-164,-89,50,50,-93,50,50,50,50,50,50,50,50,50,50,]),'DIV':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[51,-163,-162,-161,-93,-165,51,-163,-90,-91,51,51,-93,51,51,-70,-71,-72,51,-74,-75,-76,-77,-78,-79,-80,51,51,51,51,51,51,51,51,-164,-89,51,51,-93,51,51,51,51,51,51,51,51,51,51,]),'RESIDUO':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[52,-163,-162,-161,-93,-165,52,-163,-90,-91,52,52,-93,52,52,-70,-71,-72,52,-74,-75,-76,-77,-78,-79,-80,52,52,52,52,52,52,52,52,-164,-89,52,52,-93,52,52,52,52,52,52,52,52,52,52,]),'POTENCIA':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[53,-163,-162,-161,-93,-165,53,-163,-90,-91,53,53,-93,53,53,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,53,53,53,53,53,53,53,53,-164,-89,53,53,-93,53,53,53,53,53,53,53,53,53,53,]),'AND':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[54,-163,-162,-161,-93,-165,54,-163,-90,-91,54,54,-93,54,54,54,54,54,54,-74,-75,-76,-77,-78,-79,-80,54,54,54,54,54,54,54,54,-164,-89,54,54,-93,54,54,54,54,54,54,54,54,54,54,]),'OR':([9,25,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[55,72,-163,-162,-161,-93,-165,55,-163,-90,-91,55,55,-93,55,55,55,55,55,55,-74,-75,-76,-77,-78,-79,-80,55,55,55,55,55,55,55,55,-164,-89,55,55,-93,55,55,55,55,55,55,55,55,55,55,]),'SIMBOLOOR2':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[56,-163,-162,-161,-93,-165,56,-163,-90,-91,56,56,-93,56,56,56,56,56,56,-74,-75,-76,-77,-78,-79,-80,56,56,56,56,56,56,56,56,-164,-89,56,56,-93,56,56,56,56,56,56,56,56,56,56,]),'SIMBOLOOR':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[57,-163,-162,-161,-93,-165,57,-163,-90,-91,57,57,-93,57,57,57,57,57,57,-74,-75,-76,-77,-78,-79,-80,57,57,57,57,57,57,57,57,-164,-89,57,57,-93,57,57,57,57,57,57,57,57,57,57,]),'SIMBOLOAND2':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[58,-163,-162,-161,-93,-165,58,-163,-90,-91,58,58,-93,58,58,58,58,58,58,-74,-75,-76,-77,-78,-79,-80,58,58,58,58,58,58,58,58,-164,-89,58,58,-93,58,58,58,58,58,58,58,58,58,58,]),'DESPLAZAMIENTOIZQUIERDA':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[59,-163,-162,-161,-93,-165,59,-163,-90,-91,59,59,-93,59,59,59,59,59,59,59,59,59,59,59,-79,-80,59,59,59,59,59,59,59,59,-164,-89,59,59,-93,59,59,59,59,59,59,59,59,59,59,]),'DESPLAZAMIENTODERECHA':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[60,-163,-162,-161,-93,-165,60,-163,-90,-91,60,60,-93,60,60,60,60,60,60,60,60,60,60,60,-79,-80,60,60,60,60,60,60,60,60,-164,-89,60,60,-93,60,60,60,60,60,60,60,60,60,60,]),'IGUAL':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,194,195,210,229,231,233,263,301,315,337,358,360,381,391,405,],[61,-163,-162,-161,-93,-165,-92,-163,-90,-91,61,61,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,61,61,61,61,61,61,61,61,-164,-89,61,236,237,260,61,-93,61,61,61,61,61,-92,61,61,61,61,]),'IGUALIGUAL':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[62,-163,-162,-161,-93,-165,-92,-163,-90,-91,62,62,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,62,62,62,62,62,62,62,62,-164,-89,62,62,-93,62,62,62,62,62,-92,62,62,62,62,]),'NOTEQUAL':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[63,-163,-162,-161,-93,-165,-92,-163,-90,-91,63,63,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,63,63,63,63,63,63,63,63,-164,-89,63,63,-93,63,63,63,63,63,-92,63,63,63,63,]),'MAYORIGUAL':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[64,-163,-162,-161,-93,-165,-92,-163,-90,-91,64,64,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,64,64,64,64,64,64,64,64,-164,-89,64,64,-93,64,64,64,64,64,-92,64,64,64,64,]),'MENORIGUAL':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[65,-163,-162,-161,-93,-165,-92,-163,-90,-91,65,65,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,65,65,65,65,65,65,65,65,-164,-89,65,65,-93,65,65,65,65,65,-92,65,65,65,65,]),'MAYOR':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[66,-163,-162,-161,-93,-165,-92,-163,-90,-91,66,66,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,66,66,66,66,66,66,66,66,-164,-89,66,66,-93,66,66,66,66,66,-92,66,66,66,66,]),'MENOR':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[67,-163,-162,-161,-93,-165,-92,-163,-90,-91,67,67,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,67,67,67,67,67,67,67,67,-164,-89,67,67,-93,67,67,67,67,67,-92,67,67,67,67,]),'DIFERENTE':([9,26,33,34,35,46,75,76,88,89,90,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,179,229,231,233,263,301,315,337,358,360,381,391,405,],[68,-163,-162,-161,-93,-165,-92,-163,-90,-91,68,68,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,68,68,68,68,68,68,68,68,-164,-89,68,68,-93,68,68,68,68,68,-92,68,68,68,68,]),'COMA':([21,26,33,34,35,46,75,76,88,89,103,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,135,146,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,190,211,212,217,228,229,230,231,232,233,242,243,245,246,247,248,258,259,284,298,301,302,303,305,307,308,322,323,325,327,329,330,334,335,344,346,365,366,367,368,371,373,374,375,383,389,392,394,397,398,401,415,416,],[69,-63,-162,-161,-93,-165,-92,-163,-90,-91,187,-248,-247,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-62,-164,-89,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-250,262,-172,-207,187,-245,-246,-93,187,-249,286,-179,-183,-184,-185,-186,300,-169,-180,300,-173,262,-171,-209,-211,-212,-181,-188,-190,-192,-194,-195,-178,300,-168,-210,-187,-189,-191,-193,-182,-197,300,300,187,-196,-199,300,187,187,-198,300,-200,]),'DATABASES':([24,],[70,]),'DATABASE':([25,28,29,133,],[71,77,81,197,]),'TABLE':([25,28,29,],[73,78,82,]),'PUNTO':([26,76,],[74,74,]),'PARENTESISDERECHA':([33,34,35,46,75,76,88,89,90,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,179,217,223,224,242,243,245,246,247,248,258,259,265,267,268,273,284,298,305,306,307,308,310,311,322,323,325,327,329,330,334,335,337,344,346,357,358,359,360,365,366,367,368,371,373,374,375,385,389,390,391,392,394,401,412,415,416,],[-162,-161,-93,-165,-92,-163,-90,-91,146,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-164,-89,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,222,-207,269,270,285,-179,-183,-184,-185,-186,299,-169,305,307,308,312,-180,342,-209,346,-211,-212,348,349,-181,-188,-190,-192,-194,-195,-178,371,373,-168,-210,-242,-92,385,146,-187,-189,-191,-193,-182,-197,392,393,-244,-196,400,401,-199,403,-198,414,416,-200,]),'AS':([33,34,35,46,75,76,88,89,105,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,229,231,],[-162,-161,-93,-165,-92,-163,-90,-91,189,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-164,-89,189,-93,]),'FROM':([33,34,35,38,46,75,76,88,89,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,190,229,230,231,233,],[-162,-161,-93,93,-165,-92,-163,-90,-91,185,188,-248,-247,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-164,-89,-250,-245,-246,-93,-249,]),'PUNTOYCOMA':([33,34,35,46,70,75,76,79,83,87,88,89,94,103,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,131,135,139,141,142,143,144,146,149,153,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,190,192,193,202,212,216,217,222,228,229,230,231,232,233,235,239,256,263,264,270,278,279,280,282,285,292,293,294,295,296,301,302,303,305,307,308,312,313,315,317,319,342,346,347,349,350,352,357,358,361,364,380,381,383,385,388,397,398,400,403,405,406,407,408,414,],[-162,-161,-93,-165,130,-92,-163,-46,-47,145,-90,-91,-45,186,-248,-247,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,191,-164,205,207,-59,-60,-61,-89,213,-52,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-250,234,-33,255,-172,-51,-207,-53,275,-245,-246,-93,277,-249,-32,281,297,304,-50,-55,-34,-35,318,320,332,340,341,-40,-41,-42,-173,345,-171,-209,-211,-212,-58,351,-243,362,363,376,-210,-54,-57,379,-233,-242,-92,386,387,-232,-234,-236,-244,399,-235,-237,409,411,-238,-239,-240,-241,-56,]),'WHERE':([33,34,35,46,75,76,88,89,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,149,190,211,212,228,229,230,231,232,233,301,303,],[-162,-161,-93,-165,-92,-163,-90,-91,-248,-247,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-164,-89,214,-250,261,-172,274,-245,-246,-93,276,-249,-173,-171,]),'LIMIT':([33,34,35,46,75,76,88,89,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,190,229,230,231,233,313,315,317,350,352,357,358,361,380,381,383,385,397,398,405,406,407,408,],[-162,-161,-93,-165,-92,-163,-90,-91,-248,-247,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-164,-89,-250,-245,-246,-93,-249,353,-243,353,353,-233,-242,-92,353,-232,-234,-236,-244,-235,-237,-238,-239,-240,-241,]),'GROUP':([33,34,35,46,75,76,88,89,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,190,229,230,231,233,313,315,317,350,352,357,358,361,380,381,383,385,397,398,405,406,407,408,],[-162,-161,-93,-165,-92,-163,-90,-91,-248,-247,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-164,-89,-250,-245,-246,-93,-249,354,-243,354,354,-233,-242,-92,354,-232,-234,-236,-244,-235,-237,-238,-239,-240,-241,]),'HAVING':([33,34,35,46,75,76,88,89,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,190,229,230,231,233,313,315,317,350,352,357,358,361,380,381,383,385,397,398,405,406,407,408,],[-162,-161,-93,-165,-92,-163,-90,-91,-248,-247,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-164,-89,-250,-245,-246,-93,-249,355,-243,355,355,-233,-242,-92,355,-232,-234,-236,-244,-235,-237,-238,-239,-240,-241,]),'ORDER':([33,34,35,46,75,76,88,89,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,190,229,230,231,233,313,315,317,350,352,357,358,361,380,381,383,385,397,398,405,406,407,408,],[-162,-161,-93,-165,-92,-163,-90,-91,-248,-247,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-164,-89,-250,-245,-246,-93,-249,356,-243,356,356,-233,-242,-92,356,-232,-234,-236,-244,-235,-237,-238,-239,-240,-241,]),'ASC':([33,34,35,46,75,76,88,89,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,190,229,230,231,233,398,],[-162,-161,-93,-165,-92,-163,-90,-91,-248,-247,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-164,-89,-250,-245,-246,-93,-249,407,]),'DESC':([33,34,35,46,75,76,88,89,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,190,229,230,231,233,398,],[-162,-161,-93,-165,-92,-163,-90,-91,-248,-247,-93,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-164,-89,-250,-245,-246,-93,-249,408,]),'OFFSET':([33,34,35,46,75,76,88,89,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,135,146,381,],[-162,-161,-93,-165,-92,-163,-90,-91,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-164,-89,396,]),'OWNER':([33,34,46,76,131,135,136,192,193,235,239,278,279,280,282,319,364,388,],[-162,-161,-165,-163,194,-164,200,194,-33,-32,194,-34,-35,194,194,194,194,194,]),'MODE':([33,34,46,76,131,135,192,193,235,239,278,279,280,282,319,364,388,],[-162,-161,-165,-163,195,-164,195,-33,-32,195,-34,-35,195,195,195,195,195,]),'DEFAULT':([33,34,46,76,135,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,217,284,305,307,308,322,323,325,327,329,330,346,365,366,367,368,373,389,401,],[-162,-161,-165,-163,-164,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,-207,324,-209,-211,-212,324,-188,-190,-192,-194,-195,-210,-187,-189,-191,-193,-197,-196,-198,]),'NULL':([33,34,46,76,135,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,215,217,284,305,307,308,322,323,325,326,327,329,330,346,365,366,367,368,373,389,401,],[-162,-161,-165,-163,-164,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,264,-207,325,-209,-211,-212,325,-188,-190,367,-192,-194,-195,-210,-187,-189,-191,-193,-197,-196,-198,]),'UNIQUE':([33,34,46,76,102,135,152,156,157,158,159,160,161,163,167,168,169,170,171,172,173,174,175,176,177,178,198,217,284,286,305,307,308,322,323,325,327,329,330,346,365,366,367,368,369,373,389,401,],[-162,-161,-165,-163,184,-164,184,-201,-202,-203,-204,-205,-206,-208,-213,-214,-215,-216,-217,-218,-219,-220,-221,-222,-223,-224,244,-207,327,244,-209,-211,-212,327,-188,-190,-192,-194,-195,-210,-187,-189,-191,-193,389,-197,-196,-198,]),'INTO':([36,],[91,]),'KEY':([42,43,86,96,182,183,251,252,328,],[100,101,144,151,225,226,290,291,368,]),'IF':([71,81,197,],[132,140,240,]),'REPLACE':([72,],[133,]),'SET':([92,98,138,],[148,154,154,]),'TYPE':([98,138,],[155,155,]),'SMALLINT':([98,150,155,241,],[156,156,156,156,]),'INTEGER':([98,150,155,241,],[157,157,157,157,]),'BIGINT':([98,150,155,241,],[158,158,158,158,]),'NUMERIC':([98,150,155,241,],[160,160,160,160,]),'REAL':([98,150,155,241,],[161,161,161,161,]),'DOUBLE':([98,150,155,241,],[162,162,162,162,]),'MONEY':([98,150,155,241,],[163,163,163,163,]),'VARCHAR':([98,150,155,241,],[164,164,164,164,]),'CHARACTER':([98,150,155,241,],[165,165,165,165,]),'CHAR':([98,150,155,241,],[166,166,166,166,]),'TEXT':([98,150,155,241,],[167,167,167,167,]),'BOOLEAN':([98,150,155,241,],[168,168,168,168,]),'TIMESTAMP':([98,150,155,241,],[169,169,169,169,]),'TIME':([98,150,155,241,],[170,170,170,170,]),'INTERVAL':([98,150,155,241,],[171,171,171,171,]),'DATE':([98,150,155,241,],[172,172,172,172,]),'YEAR':([98,150,155,241,],[173,173,173,173,]),'MONTH':([98,150,155,241,],[174,174,174,174,]),'DAY':([98,150,155,241,],[175,175,175,175,]),'HOUR':([98,150,155,241,],[176,176,176,176,]),'MINUTE':([98,150,155,241,],[177,177,177,177,]),'SECOND':([98,150,155,241,],[178,178,178,178,]),'RENAME':([136,],[199,]),'EXISTS':([140,196,283,],[206,238,321,]),'VALUES':([147,299,],[208,343,]),'PRECISION':([162,],[217,]),'VARYING':([165,],[219,]),'TO':([199,200,],[253,254,]),'CURRENT_USER':([254,],[294,]),'SESSION_USER':([254,],[295,]),'REFERENCES':([269,348,393,],[309,378,402,]),'INHERITS':([285,],[333,]),'BY':([354,356,],[382,384,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'inicio':([0,],[1,]),'queries':([0,],[2,]),'query':([0,2,],[3,47,]),'mostrarBD':([0,2,],[4,4,]),'crearBD':([0,2,],[5,5,]),'alterBD':([0,2,],[6,6,]),'dropBD':([0,2,],[7,7,]),'useBD':([0,2,],[8,8,]),'operacion':([0,2,27,32,45,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,99,185,187,188,189,214,260,274,276,289,314,316,353,355,372,382,384,396,],[9,9,75,90,105,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,179,105,229,105,233,263,301,315,315,337,358,360,381,105,391,105,105,405,]),'insertinBD':([0,2,],[10,10,]),'updateinBD':([0,2,],[11,11,]),'deleteinBD':([0,2,],[12,12,]),'createTable':([0,2,],[13,13,]),'inheritsBD':([0,2,],[14,14,]),'dropTable':([0,2,],[15,15,]),'alterTable':([0,2,],[16,16,]),'variantesAt':([0,2,137,],[17,17,202,]),'contAdd':([0,2,39,203,],[18,18,94,94,]),'contDrop':([0,2,29,204,],[19,19,83,83,]),'contAlter':([0,2,28,201,],[20,20,79,79,]),'listaid':([0,2,],[21,21,]),'tipoAlter':([0,2,],[22,22,]),'selectData':([0,2,],[23,23,]),'final':([0,2,27,32,45,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,99,107,185,187,188,189,209,214,231,236,237,254,257,260,274,276,287,289,300,314,316,324,338,339,353,355,372,377,382,384,396,413,],[35,35,35,35,107,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,190,107,231,107,35,259,35,190,278,279,296,259,35,35,35,259,35,344,35,35,366,259,259,35,107,35,259,107,107,35,259,]),'select_list':([45,185,188,355,382,384,],[103,228,232,383,397,398,]),'asignacion':([45,185,187,188,355,382,384,],[106,106,230,106,106,106,106,]),'tipo':([98,150,155,241,],[153,153,216,284,]),'parametrosCrearBD':([131,239,280,364,],[192,282,319,388,]),'parametroCrearBD':([131,192,239,280,282,319,364,388,],[193,235,193,193,235,235,193,235,]),'asignaciones':([148,261,],[211,302,]),'asigna':([148,261,262,],[212,212,303,]),'creaColumnas':([198,],[242,]),'Columna':([198,286,],[243,334,]),'constraintcheck':([198,284,286,322,],[245,329,245,329,]),'checkinColumn':([198,284,286,322,],[246,330,246,330,]),'primaryKey':([198,286,],[247,247,]),'foreignKey':([198,286,],[248,248,]),'listaParam':([209,257,287,338,339,377,413,],[258,298,335,374,375,394,415,]),'parametroAlterUser':([254,],[293,]),'search_condition':([274,276,314,316,],[313,317,357,359,]),'paramOpcional':([284,],[322,]),'paramopc':([284,322,],[323,365,]),'opcionesSelect':([313,317,],[350,361,]),'opcionSelect':([313,317,350,361,],[352,352,380,380,]),'ordenamiento':([398,],[406,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> inicio","S'",1,None,None,None),
('inicio -> queries','inicio',1,'p_inicio_1','gramaticaAscendenteTree.py',416),
('queries -> queries query','queries',2,'p_queries_1','gramaticaAscendenteTree.py',427),
('queries -> query','queries',1,'p_queries_2','gramaticaAscendenteTree.py',440),
('query -> mostrarBD','query',1,'p_query','gramaticaAscendenteTree.py',451),
('query -> crearBD','query',1,'p_query','gramaticaAscendenteTree.py',452),
('query -> alterBD','query',1,'p_query','gramaticaAscendenteTree.py',453),
('query -> dropBD','query',1,'p_query','gramaticaAscendenteTree.py',454),
('query -> useBD','query',1,'p_query','gramaticaAscendenteTree.py',455),
('query -> operacion','query',1,'p_query','gramaticaAscendenteTree.py',456),
('query -> insertinBD','query',1,'p_query','gramaticaAscendenteTree.py',457),
('query -> updateinBD','query',1,'p_query','gramaticaAscendenteTree.py',458),
('query -> deleteinBD','query',1,'p_query','gramaticaAscendenteTree.py',459),
('query -> createTable','query',1,'p_query','gramaticaAscendenteTree.py',460),
('query -> inheritsBD','query',1,'p_query','gramaticaAscendenteTree.py',461),
('query -> dropTable','query',1,'p_query','gramaticaAscendenteTree.py',462),
('query -> alterTable','query',1,'p_query','gramaticaAscendenteTree.py',463),
('query -> variantesAt','query',1,'p_query','gramaticaAscendenteTree.py',464),
('query -> contAdd','query',1,'p_query','gramaticaAscendenteTree.py',465),
('query -> contDrop','query',1,'p_query','gramaticaAscendenteTree.py',466),
('query -> contAlter','query',1,'p_query','gramaticaAscendenteTree.py',467),
('query -> listaid','query',1,'p_query','gramaticaAscendenteTree.py',468),
('query -> tipoAlter','query',1,'p_query','gramaticaAscendenteTree.py',469),
('query -> selectData','query',1,'p_query','gramaticaAscendenteTree.py',470),
('crearBD -> CREATE DATABASE ID PUNTOYCOMA','crearBD',4,'p_crearBaseDatos_1','gramaticaAscendenteTree.py',488),
('crearBD -> CREATE DATABASE IF NOT EXISTS ID PUNTOYCOMA','crearBD',7,'p_crearBaseDatos_2','gramaticaAscendenteTree.py',511),
('crearBD -> CREATE OR REPLACE DATABASE ID PUNTOYCOMA','crearBD',6,'p_crear_replace_BaseDatos_1','gramaticaAscendenteTree.py',548),
('crearBD -> CREATE OR REPLACE DATABASE IF NOT EXISTS ID PUNTOYCOMA','crearBD',9,'p_crear_replace_BaseDatos_2','gramaticaAscendenteTree.py',581),
('crearBD -> CREATE DATABASE ID parametrosCrearBD PUNTOYCOMA','crearBD',5,'p_crear_param_BaseDatos_1','gramaticaAscendenteTree.py',629),
('crearBD -> CREATE DATABASE IF NOT EXISTS ID parametrosCrearBD PUNTOYCOMA','crearBD',8,'p_crear_param_BaseDatos_2','gramaticaAscendenteTree.py',654),
('crearBD -> CREATE OR REPLACE DATABASE ID parametrosCrearBD PUNTOYCOMA','crearBD',7,'p_crear_replace_param_BaseDatos_1','gramaticaAscendenteTree.py',694),
('crearBD -> CREATE OR REPLACE DATABASE IF NOT EXISTS ID parametrosCrearBD PUNTOYCOMA','crearBD',10,'p_crear_replace_param_BaseDatos_2','gramaticaAscendenteTree.py',729),
('parametrosCrearBD -> parametrosCrearBD parametroCrearBD','parametrosCrearBD',2,'p_parametrosCrearBD_1','gramaticaAscendenteTree.py',779),
('parametrosCrearBD -> parametroCrearBD','parametrosCrearBD',1,'p_parametrosCrearBD_2','gramaticaAscendenteTree.py',791),
('parametroCrearBD -> OWNER IGUAL final','parametroCrearBD',3,'p_parametroCrearBD','gramaticaAscendenteTree.py',801),
('parametroCrearBD -> MODE IGUAL final','parametroCrearBD',3,'p_parametroCrearBD','gramaticaAscendenteTree.py',802),
('useBD -> USE ID PUNTOYCOMA','useBD',3,'p_usarBaseDatos','gramaticaAscendenteTree.py',832),
('mostrarBD -> SHOW DATABASES PUNTOYCOMA','mostrarBD',3,'p_mostrarBD','gramaticaAscendenteTree.py',849),
('alterBD -> ALTER DATABASE ID RENAME TO ID PUNTOYCOMA','alterBD',7,'p_alterBD_1','gramaticaAscendenteTree.py',866),
('alterBD -> ALTER DATABASE ID OWNER TO parametroAlterUser PUNTOYCOMA','alterBD',7,'p_alterBD_2','gramaticaAscendenteTree.py',903),
('parametroAlterUser -> CURRENT_USER','parametroAlterUser',1,'p_parametroAlterUser_1','gramaticaAscendenteTree.py',938),
('parametroAlterUser -> SESSION_USER','parametroAlterUser',1,'p_parametroAlterUser_2','gramaticaAscendenteTree.py',952),
('parametroAlterUser -> final','parametroAlterUser',1,'p_parametroAlterUser_3','gramaticaAscendenteTree.py',966),
('dropTable -> DROP TABLE ID PUNTOYCOMA','dropTable',4,'p_dropTable','gramaticaAscendenteTree.py',976),
('alterTable -> ALTER TABLE ID variantesAt PUNTOYCOMA','alterTable',5,'p_alterTable','gramaticaAscendenteTree.py',988),
('variantesAt -> ADD contAdd','variantesAt',2,'p_variantesAt','gramaticaAscendenteTree.py',1006),
('variantesAt -> ALTER contAlter','variantesAt',2,'p_variantesAt','gramaticaAscendenteTree.py',1007),
('variantesAt -> DROP contDrop','variantesAt',2,'p_variantesAt','gramaticaAscendenteTree.py',1008),
('listaContAlter -> listaContAlter COMA contAlter','listaContAlter',3,'p_listaContAlter','gramaticaAscendenteTree.py',1020),
('listaContAlter -> contAlter','listaContAlter',1,'p_listaContAlter_2','gramaticaAscendenteTree.py',1026),
('contAlter -> COLUMN ID SET NOT NULL','contAlter',5,'p_contAlter','gramaticaAscendenteTree.py',1033),
('contAlter -> COLUMN ID TYPE tipo','contAlter',4,'p_contAlter','gramaticaAscendenteTree.py',1034),
('contAdd -> COLUMN ID tipo','contAdd',3,'p_contAdd','gramaticaAscendenteTree.py',1074),
('contAdd -> CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA','contAdd',4,'p_contAdd','gramaticaAscendenteTree.py',1075),
('contAdd -> FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID','contAdd',7,'p_contAdd','gramaticaAscendenteTree.py',1076),
('contAdd -> PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA','contAdd',5,'p_contAdd','gramaticaAscendenteTree.py',1077),
('contAdd -> CONSTRAINT ID FOREIGN KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA ID PARENTESISDERECHA','contAdd',12,'p_contAdd','gramaticaAscendenteTree.py',1078),
('contAdd -> CONSTRAINT ID PRIMARY KEY PARENTESISIZQUIERDA ID PARENTESISDERECHA','contAdd',7,'p_contAdd','gramaticaAscendenteTree.py',1079),
('contAdd -> CONSTRAINT ID UNIQUE PARENTESISIZQUIERDA ID PARENTESISDERECHA','contAdd',6,'p_contAdd','gramaticaAscendenteTree.py',1080),
('contDrop -> COLUMN ID','contDrop',2,'p_contDrop','gramaticaAscendenteTree.py',1249),
('contDrop -> CONSTRAINT ID','contDrop',2,'p_contDrop','gramaticaAscendenteTree.py',1250),
('contDrop -> PRIMARY KEY','contDrop',2,'p_contDrop','gramaticaAscendenteTree.py',1251),
('listaid -> listaid COMA ID','listaid',3,'p_listaID','gramaticaAscendenteTree.py',1292),
('listaid -> ID','listaid',1,'p_listaID_2','gramaticaAscendenteTree.py',1301),
('tipoAlter -> ADD','tipoAlter',1,'p_tipoAlter','gramaticaAscendenteTree.py',1310),
('tipoAlter -> DROP','tipoAlter',1,'p_tipoAlter','gramaticaAscendenteTree.py',1311),
('dropBD -> DROP DATABASE ID PUNTOYCOMA','dropBD',4,'p_dropBD_1','gramaticaAscendenteTree.py',1316),
('dropBD -> DROP DATABASE IF EXISTS ID PUNTOYCOMA','dropBD',6,'p_dropBD_2','gramaticaAscendenteTree.py',1339),
('operacion -> operacion MAS operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1372),
('operacion -> operacion MENOS operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1373),
('operacion -> operacion POR operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1374),
('operacion -> operacion DIV operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1375),
('operacion -> operacion RESIDUO operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1376),
('operacion -> operacion POTENCIA operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1377),
('operacion -> operacion AND operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1378),
('operacion -> operacion OR operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1379),
('operacion -> operacion SIMBOLOOR2 operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1380),
('operacion -> operacion SIMBOLOOR operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1381),
('operacion -> operacion SIMBOLOAND2 operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1382),
('operacion -> operacion DESPLAZAMIENTOIZQUIERDA operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1383),
('operacion -> operacion DESPLAZAMIENTODERECHA operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1384),
('operacion -> operacion IGUAL operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1385),
('operacion -> operacion IGUALIGUAL operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1386),
('operacion -> operacion NOTEQUAL operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1387),
('operacion -> operacion MAYORIGUAL operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1388),
('operacion -> operacion MENORIGUAL operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1389),
('operacion -> operacion MAYOR operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1390),
('operacion -> operacion MENOR operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1391),
('operacion -> operacion DIFERENTE operacion','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1392),
('operacion -> PARENTESISIZQUIERDA operacion PARENTESISDERECHA','operacion',3,'p_operacion','gramaticaAscendenteTree.py',1393),
('operacion -> MENOS ENTERO','operacion',2,'p_operacion_menos_unario_entero','gramaticaAscendenteTree.py',1750),
('operacion -> MENOS DECIMAL','operacion',2,'p_operacion_menos_unario_decimal','gramaticaAscendenteTree.py',1766),
('operacion -> NOT operacion','operacion',2,'p_operacion_not_unario','gramaticaAscendenteTree.py',1785),
('operacion -> final','operacion',1,'p_operacion_final','gramaticaAscendenteTree.py',1803),
('funcionBasica -> ABS PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1813),
('funcionBasica -> CBRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1814),
('funcionBasica -> CEIL PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1815),
('funcionBasica -> CEILING PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1816),
('funcionBasica -> DEGREES PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1817),
('funcionBasica -> DIV PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA','funcionBasica',6,'p_funcion_basica','gramaticaAscendenteTree.py',1818),
('funcionBasica -> EXP PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1819),
('funcionBasica -> FACTORIAL PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1820),
('funcionBasica -> FLOOR PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1821),
('funcionBasica -> GCD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA','funcionBasica',6,'p_funcion_basica','gramaticaAscendenteTree.py',1822),
('funcionBasica -> LCM PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA','funcionBasica',6,'p_funcion_basica','gramaticaAscendenteTree.py',1823),
('funcionBasica -> LN PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1824),
('funcionBasica -> LOG PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1825),
('funcionBasica -> MOD PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA','funcionBasica',6,'p_funcion_basica','gramaticaAscendenteTree.py',1826),
('funcionBasica -> PI PARENTESISIZQUIERDA PARENTESISDERECHA','funcionBasica',3,'p_funcion_basica','gramaticaAscendenteTree.py',1827),
('funcionBasica -> POWER PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA','funcionBasica',6,'p_funcion_basica','gramaticaAscendenteTree.py',1828),
('funcionBasica -> RADIANS PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1829),
('funcionBasica -> ROUND PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1830),
('funcionBasica -> SIGN PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1831),
('funcionBasica -> SQRT PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1832),
('funcionBasica -> TRIM_SCALE PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1833),
('funcionBasica -> TRUNC PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1834),
('funcionBasica -> WIDTH_BUCKET PARENTESISIZQUIERDA operacion COMA operacion COMA operacion COMA operacion PARENTESISDERECHA','funcionBasica',10,'p_funcion_basica','gramaticaAscendenteTree.py',1835),
('funcionBasica -> RANDOM PARENTESISIZQUIERDA PARENTESISDERECHA','funcionBasica',3,'p_funcion_basica','gramaticaAscendenteTree.py',1836),
('funcionBasica -> GREATEST PARENTESISIZQUIERDA select_list PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1837),
('funcionBasica -> LEAST PARENTESISIZQUIERDA select_list PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1838),
('funcionBasica -> NOW PARENTESISIZQUIERDA PARENTESISDERECHA','funcionBasica',3,'p_funcion_basica','gramaticaAscendenteTree.py',1839),
('funcionBasica -> ACOS PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1842),
('funcionBasica -> ACOSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1843),
('funcionBasica -> ASIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1844),
('funcionBasica -> ASIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1845),
('funcionBasica -> ATAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1846),
('funcionBasica -> ATAND PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1847),
('funcionBasica -> ATAN2 PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA','funcionBasica',6,'p_funcion_basica','gramaticaAscendenteTree.py',1848),
('funcionBasica -> ATAN2D PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA','funcionBasica',6,'p_funcion_basica','gramaticaAscendenteTree.py',1849),
('funcionBasica -> COS PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1852),
('funcionBasica -> COSD PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1853),
('funcionBasica -> COT PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1854),
('funcionBasica -> COTD PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1855),
('funcionBasica -> SIN PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1856),
('funcionBasica -> SIND PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1857),
('funcionBasica -> TAN PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1858),
('funcionBasica -> TAND PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1859),
('funcionBasica -> SINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1860),
('funcionBasica -> COSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1864),
('funcionBasica -> TANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1865),
('funcionBasica -> ASINH PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1866),
('funcionBasica -> ACOSH PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1867),
('funcionBasica -> ATANH PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1868),
('funcionBasica -> LENGTH PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1869),
('funcionBasica -> TRIM PARENTESISIZQUIERDA opcionTrim operacion FROM operacion PARENTESISDERECHA','funcionBasica',7,'p_funcion_basica','gramaticaAscendenteTree.py',1870),
('funcionBasica -> GET_BYTE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA','funcionBasica',6,'p_funcion_basica','gramaticaAscendenteTree.py',1871),
('funcionBasica -> MD5 PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1872),
('funcionBasica -> SET_BYTE PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA','funcionBasica',8,'p_funcion_basica','gramaticaAscendenteTree.py',1873),
('funcionBasica -> SHA256 PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1874),
('funcionBasica -> SUBSTR PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA','funcionBasica',8,'p_funcion_basica','gramaticaAscendenteTree.py',1875),
('funcionBasica -> CONVERT PARENTESISIZQUIERDA operacion COMA operacion COMA operacion PARENTESISDERECHA','funcionBasica',8,'p_funcion_basica','gramaticaAscendenteTree.py',1876),
('funcionBasica -> ENCODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA','funcionBasica',6,'p_funcion_basica','gramaticaAscendenteTree.py',1877),
('funcionBasica -> DECODE PARENTESISIZQUIERDA operacion COMA operacion PARENTESISDERECHA','funcionBasica',6,'p_funcion_basica','gramaticaAscendenteTree.py',1878),
('funcionBasica -> AVG PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1879),
('funcionBasica -> SUM PARENTESISIZQUIERDA operacion PARENTESISDERECHA','funcionBasica',4,'p_funcion_basica','gramaticaAscendenteTree.py',1880),
('funcionBasica -> SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion FOR operacion PARENTESISDERECHA','funcionBasica',8,'p_funcion_basica_1','gramaticaAscendenteTree.py',2887),
('funcionBasica -> SUBSTRING PARENTESISIZQUIERDA operacion FROM operacion PARENTESISDERECHA','funcionBasica',6,'p_funcion_basica_2','gramaticaAscendenteTree.py',2891),
('funcionBasica -> SUBSTRING PARENTESISIZQUIERDA operacion FOR operacion PARENTESISDERECHA','funcionBasica',6,'p_funcion_basica_3','gramaticaAscendenteTree.py',2895),
('opcionTrim -> LEADING','opcionTrim',1,'p_opcionTrim','gramaticaAscendenteTree.py',2900),
('opcionTrim -> TRAILING','opcionTrim',1,'p_opcionTrim','gramaticaAscendenteTree.py',2901),
('opcionTrim -> BOTH','opcionTrim',1,'p_opcionTrim','gramaticaAscendenteTree.py',2902),
('final -> DECIMAL','final',1,'p_final_decimal','gramaticaAscendenteTree.py',2909),
('final -> ENTERO','final',1,'p_final_entero','gramaticaAscendenteTree.py',2921),
('final -> ID','final',1,'p_final_id','gramaticaAscendenteTree.py',2933),
('final -> ID PUNTO ID','final',3,'p_final_invocacion','gramaticaAscendenteTree.py',2945),
('final -> CADENA','final',1,'p_final_cadena','gramaticaAscendenteTree.py',2961),
('insertinBD -> INSERT INTO ID VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA','insertinBD',8,'p_insertBD_1','gramaticaAscendenteTree.py',2974),
('insertinBD -> INSERT INTO ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA VALUES PARENTESISIZQUIERDA listaParam PARENTESISDERECHA PUNTOYCOMA','insertinBD',11,'p_insertBD_2','gramaticaAscendenteTree.py',3004),
('listaParam -> listaParam COMA final','listaParam',3,'p_listaParam','gramaticaAscendenteTree.py',3009),
('listaParam -> final','listaParam',1,'p_listaParam_2','gramaticaAscendenteTree.py',3023),
('updateinBD -> UPDATE ID SET asignaciones WHERE asignaciones PUNTOYCOMA','updateinBD',7,'p_updateBD','gramaticaAscendenteTree.py',3034),
('asignaciones -> asignaciones COMA asigna','asignaciones',3,'p_asignaciones','gramaticaAscendenteTree.py',3068),
('asignaciones -> asigna','asignaciones',1,'p_asignaciones_2','gramaticaAscendenteTree.py',3082),
('asigna -> ID IGUAL operacion','asigna',3,'p_asigna','gramaticaAscendenteTree.py',3093),
('deleteinBD -> DELETE FROM ID PUNTOYCOMA','deleteinBD',4,'p_deleteinBD_1','gramaticaAscendenteTree.py',3109),
('deleteinBD -> DELETE FROM ID WHERE operacion PUNTOYCOMA','deleteinBD',6,'p_deleteinBD_2','gramaticaAscendenteTree.py',3113),
('inheritsBD -> CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA INHERITS PARENTESISIZQUIERDA ID PARENTESISDERECHA PUNTOYCOMA','inheritsBD',11,'p_inheritsBD','gramaticaAscendenteTree.py',3145),
('createTable -> CREATE TABLE ID PARENTESISIZQUIERDA creaColumnas PARENTESISDERECHA PUNTOYCOMA','createTable',7,'p_createTable','gramaticaAscendenteTree.py',3179),
('creaColumnas -> creaColumnas COMA Columna','creaColumnas',3,'p_creaColumna','gramaticaAscendenteTree.py',3206),
('creaColumnas -> Columna','creaColumnas',1,'p_creaColumna_2','gramaticaAscendenteTree.py',3219),
('Columna -> ID tipo','Columna',2,'p_columna_1','gramaticaAscendenteTree.py',3231),
('Columna -> ID tipo paramOpcional','Columna',3,'p_columna_2','gramaticaAscendenteTree.py',3246),
('Columna -> UNIQUE PARENTESISIZQUIERDA listaParam PARENTESISDERECHA','Columna',4,'p_columna_3','gramaticaAscendenteTree.py',3264),
('Columna -> constraintcheck','Columna',1,'p_columna_4','gramaticaAscendenteTree.py',3279),
('Columna -> checkinColumn','Columna',1,'p_columna_5','gramaticaAscendenteTree.py',3289),
('Columna -> primaryKey','Columna',1,'p_columna_6','gramaticaAscendenteTree.py',3299),
('Columna -> foreignKey','Columna',1,'p_columna_7','gramaticaAscendenteTree.py',3309),
('paramOpcional -> paramOpcional paramopc','paramOpcional',2,'p_paramOpcional','gramaticaAscendenteTree.py',3322),
('paramOpcional -> paramopc','paramOpcional',1,'p_paramOpcional_1','gramaticaAscendenteTree.py',3335),
('paramopc -> DEFAULT final','paramopc',2,'p_paramopc_1','gramaticaAscendenteTree.py',3347),
('paramopc -> NULL','paramopc',1,'p_paramopc_1','gramaticaAscendenteTree.py',3348),
('paramopc -> NOT NULL','paramopc',2,'p_paramopc_1','gramaticaAscendenteTree.py',3349),
('paramopc -> UNIQUE','paramopc',1,'p_paramopc_1','gramaticaAscendenteTree.py',3350),
('paramopc -> PRIMARY KEY','paramopc',2,'p_paramopc_1','gramaticaAscendenteTree.py',3351),
('paramopc -> constraintcheck','paramopc',1,'p_paramopc_2','gramaticaAscendenteTree.py',3428),
('paramopc -> checkinColumn','paramopc',1,'p_paramopc_3','gramaticaAscendenteTree.py',3438),
('paramopc -> CONSTRAINT ID UNIQUE','paramopc',3,'p_paramopc_4','gramaticaAscendenteTree.py',3451),
('checkinColumn -> CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA','checkinColumn',4,'p_checkcolumna','gramaticaAscendenteTree.py',3476),
('constraintcheck -> CONSTRAINT ID CHECK PARENTESISIZQUIERDA operacion PARENTESISDERECHA','constraintcheck',6,'p_constraintcheck','gramaticaAscendenteTree.py',3492),
('primaryKey -> PRIMARY KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA','primaryKey',5,'p_primaryKey','gramaticaAscendenteTree.py',3518),
('foreignKey -> FOREIGN KEY PARENTESISIZQUIERDA listaParam PARENTESISDERECHA REFERENCES ID PARENTESISIZQUIERDA listaParam PARENTESISDERECHA','foreignKey',10,'p_foreingkey','gramaticaAscendenteTree.py',3538),
('tipo -> SMALLINT','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3573),
('tipo -> INTEGER','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3574),
('tipo -> BIGINT','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3575),
('tipo -> DECIMAL','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3576),
('tipo -> NUMERIC','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3577),
('tipo -> REAL','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3578),
('tipo -> DOUBLE PRECISION','tipo',2,'p_tipo','gramaticaAscendenteTree.py',3579),
('tipo -> MONEY','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3580),
('tipo -> VARCHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA','tipo',4,'p_tipo','gramaticaAscendenteTree.py',3581),
('tipo -> CHARACTER VARYING PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA','tipo',5,'p_tipo','gramaticaAscendenteTree.py',3582),
('tipo -> CHARACTER PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA','tipo',4,'p_tipo','gramaticaAscendenteTree.py',3583),
('tipo -> CHAR PARENTESISIZQUIERDA ENTERO PARENTESISDERECHA','tipo',4,'p_tipo','gramaticaAscendenteTree.py',3584),
('tipo -> TEXT','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3585),
('tipo -> BOOLEAN','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3586),
('tipo -> TIMESTAMP','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3587),
('tipo -> TIME','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3588),
('tipo -> INTERVAL','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3589),
('tipo -> DATE','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3590),
('tipo -> YEAR','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3591),
('tipo -> MONTH','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3592),
('tipo -> DAY','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3593),
('tipo -> HOUR','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3594),
('tipo -> MINUTE','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3595),
('tipo -> SECOND','tipo',1,'p_tipo','gramaticaAscendenteTree.py',3596),
('selectData -> SELECT select_list FROM select_list WHERE search_condition opcionesSelect PUNTOYCOMA','selectData',8,'p_select','gramaticaAscendenteTree.py',3907),
('selectData -> SELECT POR FROM select_list WHERE search_condition opcionesSelect PUNTOYCOMA','selectData',8,'p_select','gramaticaAscendenteTree.py',3908),
('selectData -> SELECT select_list FROM select_list WHERE search_condition PUNTOYCOMA','selectData',7,'p_select_1','gramaticaAscendenteTree.py',3985),
('selectData -> SELECT POR FROM select_list WHERE search_condition PUNTOYCOMA','selectData',7,'p_select_1','gramaticaAscendenteTree.py',3986),
('selectData -> SELECT select_list FROM select_list PUNTOYCOMA','selectData',5,'p_select_2','gramaticaAscendenteTree.py',4050),
('selectData -> SELECT POR FROM select_list PUNTOYCOMA','selectData',5,'p_select_2','gramaticaAscendenteTree.py',4051),
('selectData -> SELECT select_list PUNTOYCOMA','selectData',3,'p_select_3','gramaticaAscendenteTree.py',4102),
('opcionesSelect -> opcionesSelect opcionSelect','opcionesSelect',2,'p_opcionesSelect_1','gramaticaAscendenteTree.py',4120),
('opcionesSelect -> opcionSelect','opcionesSelect',1,'p_opcionesSelect_2','gramaticaAscendenteTree.py',4134),
('opcionSelect -> LIMIT operacion','opcionSelect',2,'p_opcionesSelect_3','gramaticaAscendenteTree.py',4147),
('opcionSelect -> GROUP BY select_list','opcionSelect',3,'p_opcionesSelect_3','gramaticaAscendenteTree.py',4148),
('opcionSelect -> HAVING select_list','opcionSelect',2,'p_opcionesSelect_3','gramaticaAscendenteTree.py',4149),
('opcionSelect -> ORDER BY select_list','opcionSelect',3,'p_opcionesSelect_3','gramaticaAscendenteTree.py',4150),
('opcionSelect -> LIMIT operacion OFFSET operacion','opcionSelect',4,'p_opcionesSelect_4','gramaticaAscendenteTree.py',4207),
('opcionSelect -> ORDER BY select_list ordenamiento','opcionSelect',4,'p_opcionesSelect_4','gramaticaAscendenteTree.py',4208),
('ordenamiento -> ASC','ordenamiento',1,'p_ordenamiento','gramaticaAscendenteTree.py',4249),
('ordenamiento -> DESC','ordenamiento',1,'p_ordenamiento','gramaticaAscendenteTree.py',4250),
('search_condition -> NOT search_condition','search_condition',2,'p_search_condition_2','gramaticaAscendenteTree.py',4269),
('search_condition -> operacion','search_condition',1,'p_search_condition_3','gramaticaAscendenteTree.py',4286),
('search_condition -> PARENTESISIZQUIERDA search_condition PARENTESISDERECHA','search_condition',3,'p_search_condition_4','gramaticaAscendenteTree.py',4298),
('select_list -> select_list COMA operacion','select_list',3,'p_select_list_1','gramaticaAscendenteTree.py',4325),
('select_list -> select_list COMA asignacion','select_list',3,'p_select_list_6','gramaticaAscendenteTree.py',4339),
('select_list -> asignacion','select_list',1,'p_select_list_7','gramaticaAscendenteTree.py',4352),
('select_list -> operacion','select_list',1,'p_select_list_2','gramaticaAscendenteTree.py',4363),
('asignacion -> operacion AS operacion','asignacion',3,'p_asignacion_1','gramaticaAscendenteTree.py',4373),
('asignacion -> final final','asignacion',2,'p_asignacion_2','gramaticaAscendenteTree.py',4391),
('funcionBasica -> operacion BETWEEN operacion','funcionBasica',3,'p_funcion_basica_4','gramaticaAscendenteTree.py',4404),
('funcionBasica -> operacion LIKE CADENA','funcionBasica',3,'p_funcion_basica_5','gramaticaAscendenteTree.py',4422),
('funcionBasica -> operacion IN PARENTESISIZQUIERDA select_list PARENTESISDERECHA','funcionBasica',5,'p_funcion_basica_6','gramaticaAscendenteTree.py',4442),
('funcionBasica -> operacion NOT BETWEEN operacion','funcionBasica',4,'p_funcion_basica_7','gramaticaAscendenteTree.py',4461),
('funcionBasica -> operacion BETWEEN SYMMETRIC operacion','funcionBasica',4,'p_funcion_basica_8','gramaticaAscendenteTree.py',4485),
('funcionBasica -> operacion NOT BETWEEN SYMMETRIC operacion','funcionBasica',5,'p_funcion_basica_9','gramaticaAscendenteTree.py',4508),
('funcionBasica -> operacion IS DISTINCT FROM operacion','funcionBasica',5,'p_funcion_basica_10','gramaticaAscendenteTree.py',4536),
('funcionBasica -> operacion IS NOT DISTINCT FROM operacion','funcionBasica',6,'p_funcion_basica_11','gramaticaAscendenteTree.py',4564),
]
|
py | b409e3dca188dde6e58cf1e884289180cb9a5f28 | """
Helper functions for building interactive plots that support persistent user annotations.
"""
from functools import partial
import param
import numpy as np
import pandas as pd
import panel as pn
import cartopy.crs as ccrs
import geopandas as gpd
import holoviews as hv
import geoviews as gv
import holoviews.plotting.bokeh
from holoviews import DynamicMap, Path, Table, NdOverlay, Store, Options
from holoviews.core.util import disable_constant
from holoviews.plotting.links import DataLink
from holoviews.streams import Selection1D, Stream, PolyDraw, PolyEdit, PointDraw, CDSStream
from geoviews.data.geopandas import GeoPandasInterface
from geoviews import Polygons, Points, WMTS, TriMesh, Path as GeoPath
from geoviews.util import path_to_geom_dicts
from shapely.geometry import Polygon, LinearRing, MultiPolygon
from .models.custom_tools import CheckpointTool, RestoreTool, ClearTool
from .links import VertexTableLink, PointTableLink
def paths_to_polys(path):
"""
Converts a Path object to a Polygons object by extracting all paths
interpreting inclusion zones as holes and then constructing Polygon
and MultiPolygon geometries for each path.
"""
geoms = path_to_geom_dicts(path)
polys = []
for geom in geoms:
g = geom['geometry']
found = False
for p in list(polys):
if Polygon(p['geometry']).contains(g):
if 'holes' not in p:
p['holes'] = []
p['holes'].append(g)
found = True
elif Polygon(g).contains(p['geometry']):
polys.pop(polys.index(p))
if 'holes' not in geom:
geom['holes'] = []
geom['holes'].append(p['geometry'])
if not found:
polys.append(geom)
polys_with_holes = []
for p in polys:
geom = p['geometry']
holes = []
if 'holes' in p:
holes = [LinearRing(h) for h in p['holes']]
if 'Multi' in geom.type:
polys = []
for g in geom:
subholes = [h for h in holes if g.intersects(h)]
polys.append(Polygon(g, subholes))
poly = MultiPolygon(polys)
else:
poly = Polygon(geom, holes)
p['geometry'] = poly
polys_with_holes.append(p)
return path.clone(polys_with_holes, new_type=gv.Polygons)
def poly_to_geopandas(polys, columns):
"""
Converts a GeoViews Paths or Polygons type to a geopandas dataframe.
Parameters
----------
polys : gv.Path or gv.Polygons
GeoViews element
columns: list(str)
List of columns
Returns
-------
gdf : Geopandas dataframe
"""
rows = []
for g in polys.geom():
rows.append(dict({c: '' for c in columns}, geometry=g))
return gpd.GeoDataFrame(rows, columns=columns+['geometry'])
def initialize_tools(plot, element):
"""
Initializes the Checkpoint and Restore tools.
"""
cds = plot.handles['source']
checkpoint = plot.state.select(type=CheckpointTool)
restore = plot.state.select(type=RestoreTool)
clear = plot.state.select(type=ClearTool)
if checkpoint:
checkpoint[0].sources.append(cds)
if restore:
restore[0].sources.append(cds)
if clear:
clear[0].sources.append(cds)
class GeoAnnotator(param.Parameterized):
"""
Provides support for drawing polygons and points on top of a map.
"""
tile_url = param.String(default='http://c.tile.openstreetmap.org/{Z}/{X}/{Y}.png',
doc="URL for the tile source", precedence=-1)
extent = param.NumericTuple(default=(np.nan,)*4, doc="""
Initial extent if no data is provided.""", precedence=-1)
path_type = param.ClassSelector(default=Polygons, class_=Path, is_instance=False, doc="""
The element type to draw into.""")
polys = param.ClassSelector(class_=Path, precedence=-1, doc="""
Polygon or Path element to annotate""")
points = param.ClassSelector(class_=Points, precedence=-1, doc="""
Point element to annotate""")
height = param.Integer(default=500, doc="Height of the plot",
precedence=-1)
width = param.Integer(default=900, doc="Width of the plot",
precedence=-1)
def __init__(self, polys=None, points=None, crs=None, **params):
super(GeoAnnotator, self).__init__(**params)
plot_opts = dict(height=self.height, width=self.width)
self.tiles = WMTS(self.tile_url, extents=self.extent,
crs=ccrs.PlateCarree()).opts(plot=plot_opts)
polys = [] if polys is None else polys
points = [] if points is None else points
crs = ccrs.GOOGLE_MERCATOR if crs is None else crs
tools = [CheckpointTool(), RestoreTool(), ClearTool()]
opts = dict(tools=tools, finalize_hooks=[initialize_tools], color_index=None)
if not isinstance(polys, Path):
polys = self.path_type(polys, crs=crs).options(**opts)
self.polys = polys.options(**opts)
self.poly_stream = PolyDraw(source=self.polys, data={}, show_vertices=True)
self.vertex_stream = PolyEdit(source=self.polys, vertex_style={'nonselection_alpha': 0.5})
if isinstance(points, Points):
self.points = points
else:
self.points = Points(points, self.polys.kdims, crs=crs).options(**opts)
self.point_stream = PointDraw(source=self.points, drag=True, data={})
def pprint(self):
params = dict(self.get_param_values())
name = params.pop('name')
string = '%s\n%s\n\n' % (name, '-'*len(name))
for item in sorted(params.items()):
string += ' %s: %s\n' % (item)
print(string)
def map_view(self):
return self.tiles * self.polys * self.points
def panel(self):
return pn.Row(self.map_view())
class PointWidgetAnnotator(GeoAnnotator):
"""
Allows adding a group to the currently selected points using
a dropdown menu and add button. The current annotations are
reflected in a table.
Works by using a shared datasource on the Points and Table.
"""
add = param.Action(default=lambda self: self.add_group(), precedence=1,
doc="""Button triggering add action.""")
group = param.ObjectSelector()
column = param.String(default='Group', constant=True)
table_height = param.Integer(default=150, doc="Height of the table",
precedence=-1)
table_width = param.Integer(default=300, doc="Width of the table",
precedence=-1)
def __init__(self, groups, **params):
super(PointWidgetAnnotator, self).__init__(**params)
group_param = self.params('group')
group_param.objects = groups
group_param.default = groups[0]
self.point_sel_stream = Selection1D(source=self.points)
self._group_data = {g: [] for g in groups}
self.table_stream = Stream.define('TableUpdate')(transient=True)
def add_group(self, **kwargs):
new_index = self.point_sel_stream.index
for idx in new_index:
if idx not in self._group_data[self.group]:
self._group_data[self.group].append(idx)
for g, inds in self._group_data.items():
if g != self.group:
self._group_data[g] = [idx for idx in inds if idx not in new_index]
self.table_stream.trigger([self.table_stream])
def group_table(self):
plot = dict(width=self.table_width, height=self.table_height)
data = [(group, str(inds)) for group, inds in self._group_data.items()]
return Table(data, self.column, 'index').sort().opts(plot=plot)
def annotated_points(self):
element = self.point_stream.element
groups = []
for g, idx in self._group_data.items():
df = element.iloc[idx].dframe()
df[self.column] = g
groups.append(df)
data = pd.concat(groups).sort_values(self.column) if groups else []
return element.clone(data, vdims=self.column).opts(plot={'color_index': self.column},
style={'cmap': 'Category20'})
def map_view(self):
options = dict(tools=['box_select'], clone=False)
annotated = DynamicMap(self.annotated_points, streams=[self.table_stream])
return self.tiles * self.polys * self.points.options(**options) * annotated
def table_view(self):
return DynamicMap(self.group_table, streams=[self.table_stream])
def panel(self):
return pn.Row(self.param, self.map_view(), self.table_view())
class PolyAnnotator(GeoAnnotator):
"""
Allows drawing and annotating Polygons using a bokeh DataTable.
"""
poly_columns = param.List(default=['Group'], doc="""
Columns to annotate the Polygons with.""", precedence=-1)
vertex_columns = param.List(default=[], doc="""
Columns to annotate the Polygons with.""", precedence=-1)
table_height = param.Integer(default=150, doc="Height of the table",
precedence=-1)
table_width = param.Integer(default=400, doc="Width of the table",
precedence=-1)
def __init__(self, poly_data={}, **params):
super(PolyAnnotator, self).__init__(**params)
style = dict(editable=True)
plot = dict(width=self.table_width, height=self.table_height)
# Add annotation columns to poly data
for col in self.poly_columns:
if col not in self.polys:
self.polys = self.polys.add_dimension(col, 0, '', True)
self.poly_stream.source = self.polys
self.vertex_stream.source = self.polys
if len(self.polys):
poly_data = gv.project(self.polys).split()
self.poly_stream.event(data={kd.name: [p.dimension_values(kd) for p in poly_data]
for kd in self.polys.kdims})
poly_data = {c: self.polys.dimension_values(c, expanded=False) for c in self.poly_columns}
if len(set(len(v) for v in poly_data.values())) != 1:
raise ValueError('poly_columns must refer to value dimensions '
'which vary per path while at least one of '
'%s varies by vertex.' % self.poly_columns)
self.poly_table = Table(poly_data, self.poly_columns, []).opts(plot=plot, style=style)
self.poly_link = DataLink(source=self.polys, target=self.poly_table)
self.vertex_table = Table([], self.polys.kdims, self.vertex_columns).opts(plot=plot, style=style)
self.vertex_link = VertexTableLink(self.polys, self.vertex_table)
def map_view(self):
return (self.tiles * self.polys.options(clone=False, line_width=5) *
self.points.options(tools=['hover'], clone=False))
def table_view(self):
return pn.Tabs(('Polygons', self.poly_table), ('Vertices', self.vertex_table))
def panel(self):
return pn.Row(self.map_view(), self.table_view())
@param.output(path=hv.Path)
def path_output(self):
return self.poly_stream.element
class PointAnnotator(GeoAnnotator):
"""
Allows drawing and annotating Points using a bokeh DataTable.
"""
point_columns = param.List(default=['Size'], doc="""
Columns to annotate the Points with.""", precedence=-1)
table_height = param.Integer(default=150, doc="Height of the table",
precedence=-1)
table_width = param.Integer(default=400, doc="Width of the table",
precedence=-1)
def __init__(self, **params):
super(PointAnnotator, self).__init__(**params)
style = dict(editable=True)
plot = dict(width=self.table_width, height=self.table_height)
for col in self.point_columns:
if col not in self.points:
self.points = self.points.add_dimension(col, 0, None, True)
self.point_stream = PointDraw(source=self.points, data={})
projected = gv.project(self.points, projection=ccrs.PlateCarree())
self.point_table = Table(projected).opts(plot=plot, style=style)
self.point_link = PointTableLink(source=self.points, target=self.point_table)
def table_view(self):
return self.point_table
def panel(self):
return pn.Row(self.map_view(), self.table_view())
@param.output(points=gv.Points)
def point_output(self):
return self.point_stream.element
class PolyAndPointAnnotator(PolyAnnotator, PointAnnotator):
"""
Allows drawing and annotating Points and Polygons using a bokeh
DataTable.
"""
def table_view(self):
return pn.Tabs(('Polygons', self.poly_table), ('Vertices', self.vertex_table),
('Points', self.point_table))
class PolyExporter(param.Parameterized):
filename = param.String(default='')
path = param.ClassSelector(class_=Path, precedence=-1)
save = param.Action(default=lambda x: x._save())
def __init__(self, path, **params):
self._polys = paths_to_polys(path)
super(PolyExporter, self).__init__(path=path, **params)
def _save(self):
pass
def panel(self):
return pn.Row(self.param, self._polys.options(width=800, height=600))
options = Store.options('bokeh')
options.Points = Options('plot', padding=0.1)
options.Points = Options('style', size=10, line_color='black')
options.Path = Options('plot', padding=0.1)
options.Polygons = Options('plot', padding=0.1)
|
py | b409e4dd44233a5c14e13d2460cb244dda9146da | #==============================#
# System Import #
#==============================#
#==============================#
# Platform Import #
#==============================#
import numpy as np
import torch
import torch.nn as nn
#==============================#
# Class/Layer Part Import #
#==============================#
from layers.utils import *
from layers.basic import Embedding
from layers.reader import Reader
from layers.generator import Generator
from layers.searcher import Searcher as Searcher_
from layers.searcher import Debug, Prob
from layers.data_process import Padding
from layers.data_process import LargeVocabularyTrick
from layers.Loss import calcLoss
BOS, EOS = 1, 2
class LSTM2_MeanDiff_FlatToken(nn.Module):
BOS, EOS = 1, 2
def __init__(self, options, log, emb_tok_init = None):
super(LSTM2_MeanDiff_FlatToken, self).__init__()
self.log = log
self.options = options
self.LVT = None
self.Emb = Embedding(options['network']['Embedding'])
self.Reader = Reader(options['network']['Reader'])
self.Generator = Generator(options['network']['Generator'])
def setLVT(self, LVT):
self.LVT = LVT
def forward(self, source, len_source, target = None, len_target = None, bi_in = None, debugMode = False, sysGen = None):
source_emb = self.Emb(source)
h_e, source_mask = self.Reader(source_emb, len_source)
if target is not None:
target_emb = self.Emb(target)
outputs, states_next = self.Generator(self.LVT, (h_e, source_mask), [target_emb, len_target])
return outputs, states_next
if sysGen is None:
Searcher = Searcher_(self.options)
Searcher.setType(LSTM2_MeanDiff_FlatToken)
Searcher.Emb_Set(self.Emb)
Searcher.Generator_Set(self.Generator)
Searcher.LVT_Set(self.LVT)
return Searcher.search((h_e, source_mask), bi_in)
else:
Searcher = Prob(self.options)
Searcher.setType(LSTM2_MeanDiff_FlatToken)
Searcher.Emb_Set(self.Emb)
Searcher.Generator_Set(self.Generator)
Searcher.LVT_Set(self.LVT)
return Searcher.search((h_e, source_mask), sysGen)
def getLoss(self, source, target, sfeat, rfeat):
'''
Decoder Input: shift_target_OPI;
Decoder Output: sharped_target_OPI
'''
LVT = LargeVocabularyTrick(source, self.options)
self.setLVT(LVT)
padded_source, len_source = Padding.padding(source)
target = Padding.rightEOS(target)
padded_target, len_target = Padding.padding(target)
# ADD BOS Before the Sequence
shifted_target = Padding.rightShiftCut(padded_target, self.BOS)
sharped_target = LVT.sharp(target)
padded_sharped_target, len_target = Padding.padding(sharped_target)
outputs, states_pass = self.forward(padded_source, len_source, shifted_target, len_target)
preds = outputs[0]
loss = calcLoss(preds, padded_sharped_target, len_target)
return loss
def getAnswer(self, Answers, Vocab, doc):
text = translate([[tok[0], tok[1]] for tok in Answers], Vocab, doc)
parses = "No Parses In this Model."
return text, parses
def genSummary(self, source, Vocab, doc, bi_in = None):
# Processing Data
LVT = LargeVocabularyTrick(source, self.options)
self.setLVT(LVT)
padded_source, len_source = Padding.padding(source)
Answers = self.forward(padded_source, len_source, bi_in = bi_in)
Answers = sorted(Answers, key = lambda x:x[0]/len(x[1]))
textAnswers = []
parseAnswers = []
N = len(Answers)
for i in range(N):
text, parse = self.getAnswer(Answers[i][1][1:-1], Vocab, doc)
textAnswers.append(text)
parseAnswers.append(parse)
return [textAnswers, parseAnswers]
def getProb(self, source, Vocab, sysGen):
# Processing Data
LVT = ActionPool(source, self.options)
self.setLVT(LVT)
target = OpMapping.OPPSeqs2OPISeqs([sysGen])
sharped_target = LVT.sharp(target)[0]
padded_source, len_source = Padding.padding(source)
Answer = self.forward(padded_source, len_source, sysGen = sharped_target)
return Answer
def debug(self, source, target, sfeat, rfeat):
pass
@staticmethod
def state_init():
'''
ph_d, ch_d
'''
return [None, None]
@staticmethod
def state_pack(s):
return [numpy2torch(s[0]), numpy2torch(s[1])]
@staticmethod
def state_unpack(s):
return [torch2numpy(s[0]), torch2numpy(s[1])]
@staticmethod
def state_process(state_old, state_new, action):
return state_new
@staticmethod
def cond(preds, state_pass, conditions = None):
return np.asarray(range(conditions['maxSize']))
@staticmethod
def getPred(outputs):
return outputs[0]
@staticmethod
def getAtt(outputs):
return outputs[1]
@staticmethod
def lastToken(seq, conditions):
return seq[-1][0]
@staticmethod
def checkEnd(Sequence, State_Pass):
return (Sequence[-1][0] == EOS)
@staticmethod
def getInputs(Token, Lengths, Emb):
target_emb = Emb(Token)
len_target = Lengths
return [target_emb, len_target] |
py | b409e5bb68ca32443a795a5b0390529690940f61 | from random import randint
class Proceso:
def __init__(self,min_tick,max_tick,min_llegada,max_llegada):
self.color_names = ['\033[31m','\033[32m','\033[33m','\033[34m','\033[36m','\033[37m','\033[91m','\033[92m','\033[93m','\033[94m','\033[95m']
self.color = self.color_names[randint(0,len(self.color_names)-1)]
self.nombre = self.color+"["+str(randint(10,99))+"]\033[0m"
self.t = randint(min_tick,max_tick) # Asigna un tiempo de ejecucion t entre un rango de ticks
self.llegada = randint(min_llegada,max_llegada) #asigna una llegada aleatoria entre un rango de quantums
|
py | b409e5f3e8085434e9bf04633e96d7ffc46cd006 | # coding: utf-8
import pprint
import re
import six
class ListDatabaseRolesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'role_name': 'str',
'db_name': 'str',
'offset': 'int',
'limit': 'int'
}
attribute_map = {
'instance_id': 'instance_id',
'role_name': 'role_name',
'db_name': 'db_name',
'offset': 'offset',
'limit': 'limit'
}
def __init__(self, instance_id=None, role_name=None, db_name=None, offset=None, limit=None):
"""ListDatabaseRolesRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._role_name = None
self._db_name = None
self._offset = None
self._limit = None
self.discriminator = None
self.instance_id = instance_id
if role_name is not None:
self.role_name = role_name
if db_name is not None:
self.db_name = db_name
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
@property
def instance_id(self):
"""Gets the instance_id of this ListDatabaseRolesRequest.
:return: The instance_id of this ListDatabaseRolesRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListDatabaseRolesRequest.
:param instance_id: The instance_id of this ListDatabaseRolesRequest.
:type: str
"""
self._instance_id = instance_id
@property
def role_name(self):
"""Gets the role_name of this ListDatabaseRolesRequest.
:return: The role_name of this ListDatabaseRolesRequest.
:rtype: str
"""
return self._role_name
@role_name.setter
def role_name(self, role_name):
"""Sets the role_name of this ListDatabaseRolesRequest.
:param role_name: The role_name of this ListDatabaseRolesRequest.
:type: str
"""
self._role_name = role_name
@property
def db_name(self):
"""Gets the db_name of this ListDatabaseRolesRequest.
:return: The db_name of this ListDatabaseRolesRequest.
:rtype: str
"""
return self._db_name
@db_name.setter
def db_name(self, db_name):
"""Sets the db_name of this ListDatabaseRolesRequest.
:param db_name: The db_name of this ListDatabaseRolesRequest.
:type: str
"""
self._db_name = db_name
@property
def offset(self):
"""Gets the offset of this ListDatabaseRolesRequest.
:return: The offset of this ListDatabaseRolesRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListDatabaseRolesRequest.
:param offset: The offset of this ListDatabaseRolesRequest.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ListDatabaseRolesRequest.
:return: The limit of this ListDatabaseRolesRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListDatabaseRolesRequest.
:param limit: The limit of this ListDatabaseRolesRequest.
:type: int
"""
self._limit = limit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListDatabaseRolesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b409e658fa50633283f4e83678ad54c0c2787b54 | from setuptools import setup,find_packages
from os import path
# Pull the current README
cwd = path.abspath(path.dirname(__file__))
with open(path.join(cwd, 'README.md'), 'r') as fh:
long_description = fh.read()
setup(
name='word_tools',
packages=find_packages(), # What does this do?
version='0.2.0',
author='ncdulo',
license='MIT',
description='Utilities for performing actions on words, or collections of words.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/ncdulo/word_tools',
keywords='word tools dictionary urbandictionary utility cli module',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing',
'Topic :: Text Processing :: Filters',
'Topic :: Utilities',
],
python_requires='>=3.6',
py_modules=['cli'],
install_requires=[
'requests',
'click',
'beautifulsoup4',
'pymediawiki',
],
entry_points = {
'console_scripts':
['wt=word_tools.__main__:main',
],
}
)
|
py | b409e74bf283c28126534ca3892ce0459b0054f5 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-22 23:33
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('images', '0005_auto_20180122_1732'),
]
operations = [
migrations.AlterField(
model_name='image',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='images', to=settings.AUTH_USER_MODEL),
),
]
|
py | b409e79d85badc6c389ddbf7edee0b66a6b0b4d3 | import os
import yaml
default_config_yaml = """
# Metadata
use_exif_size: yes
unknown_camera_models_are_different: no # Treat images from unknown camera models as comming from different cameras
default_focal_prior: 0.85
# Params for features
feature_type: HAHOG # Feature type (AKAZE, SURF, SIFT, HAHOG, ORB)
feature_root: 1 # If 1, apply square root mapping to features
feature_min_frames: 4000 # If fewer frames are detected, sift_peak_threshold/surf_hessian_threshold is reduced.
feature_min_frames_panorama: 16000 # Same as above but for panorama images
feature_process_size: 2048 # Resize the image if its size is larger than specified. Set to -1 for original size
feature_process_size_panorama: 4096 # Same as above but for panorama images
feature_use_adaptive_suppression: no
features_bake_segmentation: no # Bake segmentation info (class and instance) in the feature data. Thus it is done once for all at extraction time.
# Params for SIFT
sift_peak_threshold: 0.1 # Smaller value -> more features
sift_edge_threshold: 10 # See OpenCV doc
# Params for SURF
surf_hessian_threshold: 3000 # Smaller value -> more features
surf_n_octaves: 4 # See OpenCV doc
surf_n_octavelayers: 2 # See OpenCV doc
surf_upright: 0 # See OpenCV doc
# Params for AKAZE (See details in lib/src/third_party/akaze/AKAZEConfig.h)
akaze_omax: 4 # Maximum octave evolution of the image 2^sigma (coarsest scale sigma units)
akaze_dthreshold: 0.001 # Detector response threshold to accept point
akaze_descriptor: MSURF # Feature type
akaze_descriptor_size: 0 # Size of the descriptor in bits. 0->Full size
akaze_descriptor_channels: 3 # Number of feature channels (1,2,3)
akaze_kcontrast_percentile: 0.7
akaze_use_isotropic_diffusion: no
# Params for HAHOG
hahog_peak_threshold: 0.00001
hahog_edge_threshold: 10
hahog_normalize_to_uchar: yes
# Params for general matching
lowes_ratio: 0.8 # Ratio test for matches
matcher_type: FLANN # FLANN, BRUTEFORCE, or WORDS
symmetric_matching: yes # Match symmetricly or one-way
# Params for FLANN matching
flann_algorithm: KMEANS # Algorithm type (KMEANS, KDTREE)
flann_branching: 8 # See OpenCV doc
flann_iterations: 10 # See OpenCV doc
flann_tree: 8 # See OpenCV doc
flann_checks: 20 # Smaller -> Faster (but might lose good matches)
# Params for BoW matching
bow_file: bow_hahog_root_uchar_10000.npz
bow_words_to_match: 50 # Number of words to explore per feature.
bow_num_checks: 20 # Number of matching features to check.
bow_matcher_type: FLANN # Matcher type to assign words to features
# Params for VLAD matching
vlad_file: bow_hahog_root_uchar_64.npz
# Params for matching
matching_gps_distance: 150 # Maximum gps distance between two images for matching
matching_gps_neighbors: 0 # Number of images to match selected by GPS distance. Set to 0 to use no limit (or disable if matching_gps_distance is also 0)
matching_time_neighbors: 0 # Number of images to match selected by time taken. Set to 0 to disable
matching_order_neighbors: 0 # Number of images to match selected by image name. Set to 0 to disable
matching_bow_neighbors: 0 # Number of images to match selected by BoW distance. Set to 0 to disable
matching_bow_gps_distance: 0 # Maximum GPS distance for preempting images before using selection by BoW distance. Set to 0 to disable
matching_bow_gps_neighbors: 0 # Number of images (selected by GPS distance) to preempt before using selection by BoW distance. Set to 0 to use no limit (or disable if matching_bow_gps_distance is also 0)
matching_bow_other_cameras: False # If True, BoW image selection will use N neighbors from the same camera + N neighbors from any different camera.
matching_vlad_neighbors: 0 # Number of images to match selected by VLAD distance. Set to 0 to disable
matching_vlad_gps_distance: 0 # Maximum GPS distance for preempting images before using selection by VLAD distance. Set to 0 to disable
matching_vlad_gps_neighbors: 0 # Number of images (selected by GPS distance) to preempt before using selection by VLAD distance. Set to 0 to use no limit (or disable if matching_vlad_gps_distance is also 0)
matching_vlad_other_cameras: False # If True, VLAD image selection will use N neighbors from the same camera + N neighbors from any different camera.
matching_use_filters: False # If True, removes static matches using ad-hoc heuristics
# Params for geometric estimation
robust_matching_threshold: 0.004 # Outlier threshold for fundamental matrix estimation as portion of image width
robust_matching_calib_threshold: 0.004 # Outlier threshold for essential matrix estimation during matching in radians
robust_matching_min_match: 20 # Minimum number of matches to accept matches between two images
five_point_algo_threshold: 0.004 # Outlier threshold for essential matrix estimation during incremental reconstruction in radians
five_point_algo_min_inliers: 20 # Minimum number of inliers for considering a two view reconstruction valid
five_point_refine_match_iterations: 10 # Number of LM iterations to run when refining relative pose during matching
five_point_refine_rec_iterations: 1000 # Number of LM iterations to run when refining relative pose during reconstruction
triangulation_threshold: 0.006 # Outlier threshold for accepting a triangulated point in radians
triangulation_min_ray_angle: 1.0 # Minimum angle between views to accept a triangulated point
triangulation_type: FULL # Triangulation type : either considering all rays (FULL), or sing a RANSAC variant (ROBUST)
resection_threshold: 0.004 # Outlier threshold for resection in radians
resection_min_inliers: 10 # Minimum number of resection inliers to accept it
# Params for track creation
min_track_length: 2 # Minimum number of features/images per track
# Params for bundle adjustment
loss_function: SoftLOneLoss # Loss function for the ceres problem (see: http://ceres-solver.org/modeling.html#lossfunction)
loss_function_threshold: 1 # Threshold on the squared residuals. Usually cost is quadratic for smaller residuals and sub-quadratic above.
reprojection_error_sd: 0.004 # The standard deviation of the reprojection error
exif_focal_sd: 0.01 # The standard deviation of the exif focal length in log-scale
principal_point_sd: 0.01 # The standard deviation of the principal point coordinates
radial_distortion_k1_sd: 0.01 # The standard deviation of the first radial distortion parameter
radial_distortion_k2_sd: 0.01 # The standard deviation of the second radial distortion parameter
radial_distortion_k3_sd: 0.01 # The standard deviation of the third radial distortion parameter
radial_distortion_k4_sd: 0.01 # The standard deviation of the fourth radial distortion parameter
tangential_distortion_p1_sd: 0.01 # The standard deviation of the first tangential distortion parameter
tangential_distortion_p2_sd: 0.01 # The standard deviation of the second tangential distortion parameter
rig_translation_sd: 0.1 # The standard deviation of the rig translation
rig_rotation_sd: 0.1 # The standard deviation of the rig rotation
bundle_outlier_filtering_type: FIXED # Type of threshold for filtering outlier : either fixed value (FIXED) or based on actual distribution (AUTO)
bundle_outlier_auto_ratio: 3.0 # For AUTO filtering type, projections with larger reprojection than ratio-times-mean, are removed
bundle_outlier_fixed_threshold: 0.006 # For FIXED filtering type, projections with larger reprojection error after bundle adjustment are removed
optimize_camera_parameters: yes # Optimize internal camera parameters during bundle
bundle_max_iterations: 100 # Maximum optimizer iterations.
retriangulation: yes # Retriangulate all points from time to time
retriangulation_ratio: 1.2 # Retriangulate when the number of points grows by this ratio
bundle_analytic_derivatives: yes # Use analytic derivatives or auto-differentiated ones during bundle adjustment
bundle_interval: 999999 # Bundle after adding 'bundle_interval' cameras
bundle_new_points_ratio: 1.2 # Bundle when the number of points grows by this ratio
local_bundle_radius: 3 # Max image graph distance for images to be included in local bundle adjustment
local_bundle_min_common_points: 20 # Minimum number of common points betwenn images to be considered neighbors
local_bundle_max_shots: 30 # Max number of shots to optimize during local bundle adjustment
save_partial_reconstructions: no # Save reconstructions at every iteration
# Params for GPS alignment
use_altitude_tag: no # Use or ignore EXIF altitude tag
align_method: orientation_prior # orientation_prior or naive
align_orientation_prior: horizontal # horizontal, vertical or no_roll
bundle_use_gps: yes # Enforce GPS position in bundle adjustment
bundle_use_gcp: no # Enforce Ground Control Point position in bundle adjustment
# Params for rigs
rig_calibration_subset_size: 15
# Params for navigation graph
nav_min_distance: 0.01 # Minimum distance for a possible edge between two nodes
nav_step_pref_distance: 6 # Preferred distance between camera centers
nav_step_max_distance: 20 # Maximum distance for a possible step edge between two nodes
nav_turn_max_distance: 15 # Maximum distance for a possible turn edge between two nodes
nav_step_forward_view_threshold: 15 # Maximum difference of angles in degrees between viewing directions for forward steps
nav_step_view_threshold: 30 # Maximum difference of angles in degrees between viewing directions for other steps
nav_step_drift_threshold: 36 # Maximum motion drift with respect to step directions for steps in degrees
nav_turn_view_threshold: 40 # Maximum difference of angles in degrees with respect to turn directions
nav_vertical_threshold: 20 # Maximum vertical angle difference in motion and viewing direction in degrees
nav_rotation_threshold: 30 # Maximum general rotation in degrees between cameras for steps
# Params for image undistortion
undistorted_image_format: jpg # Format in which to save the undistorted images
undistorted_image_max_size: 100000 # Max width and height of the undistorted image
# Params for depth estimation
depthmap_method: PATCH_MATCH_SAMPLE # Raw depthmap computation algorithm (PATCH_MATCH, BRUTE_FORCE, PATCH_MATCH_SAMPLE)
depthmap_resolution: 640 # Resolution of the depth maps
depthmap_num_neighbors: 10 # Number of neighboring views
depthmap_num_matching_views: 6 # Number of neighboring views used for each depthmaps
depthmap_min_depth: 0 # Minimum depth in meters. Set to 0 to auto-infer from the reconstruction.
depthmap_max_depth: 0 # Maximum depth in meters. Set to 0 to auto-infer from the reconstruction.
depthmap_patchmatch_iterations: 3 # Number of PatchMatch iterations to run
depthmap_patch_size: 7 # Size of the correlation patch
depthmap_min_patch_sd: 1.0 # Patches with lower standard deviation are ignored
depthmap_min_correlation_score: 0.1 # Minimum correlation score to accept a depth value
depthmap_same_depth_threshold: 0.01 # Threshold to measure depth closeness
depthmap_min_consistent_views: 3 # Min number of views that should reconstruct a point for it to be valid
depthmap_save_debug_files: no # Save debug files with partial reconstruction results
# Other params
processes: 1 # Number of threads to use
read_processes: 4 # When processes > 1, number of threads used for reading images
# Params for submodel split and merge
submodel_size: 80 # Average number of images per submodel
submodel_overlap: 30.0 # Radius of the overlapping region between submodels
submodels_relpath: "submodels" # Relative path to the submodels directory
submodel_relpath_template: "submodels/submodel_%04d" # Template to generate the relative path to a submodel directory
submodel_images_relpath_template: "submodels/submodel_%04d/images" # Template to generate the relative path to a submodel images directory
"""
def default_config():
"""Return default configuration"""
return yaml.safe_load(default_config_yaml)
def load_config(filepath):
"""DEPRECATED: Load config from a config.yaml filepath"""
if not os.path.isfile(filepath):
return default_config()
with open(filepath) as fin:
return load_config_from_fileobject(fin)
def load_config_from_fileobject(f):
"""Load config from a config.yaml fileobject"""
config = default_config()
new_config = yaml.safe_load(f)
if new_config:
for k, v in new_config.items():
config[k] = v
return config
|
py | b409e9a18fd22a8e71c2b6cb5ed8e405afbe2ffc | # --------------------------------------------------------
# Pytorch multi-GPU Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data.sampler import Sampler
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.utils.net_utils import weights_normal_init, save_net, load_net, \
adjust_learning_rate, save_checkpoint, clip_gradient
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res101',
default='vgg16', type=str)
parser.add_argument('--start_epoch', dest='start_epoch',
help='starting epoch',
default=1, type=int)
parser.add_argument('--epochs', dest='max_epochs',
help='number of epochs to train',
default=20, type=int)
parser.add_argument('--disp_interval', dest='disp_interval',
help='number of iterations to display',
default=100, type=int)
parser.add_argument('--checkpoint_interval', dest='checkpoint_interval',
help='number of iterations to display',
default=10000, type=int)
parser.add_argument('--save_dir', dest='save_dir',
help='directory to save models', default="models",
type=str)
parser.add_argument('--nw', dest='num_workers',
help='number of workers to load data',
default=0, type=int)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=1, type=int)
parser.add_argument('--cag', dest='class_agnostic',
help='whether to perform class_agnostic bbox regression',
action='store_true')
# config optimization
parser.add_argument('--o', dest='optimizer',
help='training optimizer',
default="sgd", type=str)
parser.add_argument('--lr', dest='lr',
help='starting learning rate',
default=0.001, type=float)
"""
parser.add_argument('--lr_decay_step', dest='lr_decay_step',
help='step to do learning rate decay, unit is epoch',
default=5, type=int)
"""
parser.add_argument('--lr_decay_step', dest='lr_decay_step',
help='step to do learning rate decay, unit is iterations',
default=50000, type=int)
parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma',
help='learning rate decay ratio',
default=0.1, type=float)
# set training session
parser.add_argument('--s', dest='session',
help='training session',
default=1, type=int)
# resume trained model
parser.add_argument('--r', dest='resume',
help='resume checkpoint or not',
default=False, type=bool)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load model',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load model',
default=0, type=int)
# log and display
parser.add_argument('--use_tfb', dest='use_tfboard',
help='whether use tensorboard',
action='store_true')
args = parser.parse_args()
return args
class sampler(Sampler):
def __init__(self, train_size, batch_size):
self.num_data = train_size
self.num_per_batch = int(train_size / batch_size)
self.batch_size = batch_size
self.range = torch.arange(0,batch_size).view(1, batch_size).long()
self.leftover_flag = False
if train_size % batch_size:
self.leftover = torch.arange(self.num_per_batch*batch_size, train_size).long()
self.leftover_flag = True
def __iter__(self):
rand_num = torch.randperm(self.num_per_batch).view(-1,1) * self.batch_size
self.rand_num = rand_num.expand(self.num_per_batch, self.batch_size) + self.range
self.rand_num_view = self.rand_num.view(-1)
if self.leftover_flag:
self.rand_num_view = torch.cat((self.rand_num_view, self.leftover),0)
return iter(self.rand_num_view)
def __len__(self):
return self.num_data
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "species":
args.imdb_name = "species_2007_trainval+species_2007_test"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '30']
elif args.dataset == "vg":
# train sizes: train, smalltrain, minitrain
# train scale: ['150-50-20', '150-50-50', '500-150-80', '750-250-150', '1750-700-450', '1600-400-20']
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "city":
args.imdb_name = "city_2007_trainval"
args.imdbval_name = "city_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "kitti-voc":
args.imdb_name = "kitti-voc_2007_trainval"
args.imdbval_name = "kitti-voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "caltech":
args.imdb_name = "coco_2014_train"
args.imdbval_name = "coco_2014_cis_test"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '50']
elif args.dataset == "pascal-voc":
args.imdb_name = "pascal-voc-2007_2007_trainval+pascal-voc-2012_2012_trainval"
args.imdbval_name = "..."
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "sim10k":
args.imdb_name = "sim10k_2012_trainval"
args.imdbval_name = "sim10k_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
elif args.dataset == "caltech-voc":
args.imdb_name = "caltech-voc_2007_trainval"
args.imdbval_name = "caltech-voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]', 'MAX_NUM_GT_BOXES', '20']
args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
np.random.seed(cfg.RNG_SEED)
#torch.backends.cudnn.benchmark = True
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# train set
# -- Note: Use validation set and disable the flipped to enable faster loading.
cfg.TRAIN.USE_FLIPPED = True
cfg.USE_GPU_NMS = args.cuda
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name)
train_size = len(roidb)
print('{:d} roidb entries'.format(len(roidb)))
output_dir = args.save_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(output_dir):
os.makedirs(output_dir)
sampler_batch = sampler(train_size, args.batch_size)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, args.batch_size, \
imdb.num_classes, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
sampler=sampler_batch, num_workers=args.num_workers)
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=True, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=True, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=True, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=True, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
lr = cfg.TRAIN.LEARNING_RATE
lr = args.lr
#tr_momentum = cfg.TRAIN.MOMENTUM
#tr_momentum = args.momentum
params = []
for key, value in dict(fasterRCNN.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), \
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params':[value],'lr':lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
if args.optimizer == "adam":
lr = lr * 0.1
optimizer = torch.optim.Adam(params)
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
if args.cuda:
fasterRCNN.cuda()
if args.resume:
load_name = os.path.join(output_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
print("loading checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
args.session = checkpoint['session']
args.start_epoch = checkpoint['epoch']
fasterRCNN.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr = optimizer.param_groups[0]['lr']
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print("loaded checkpoint %s" % (load_name))
if args.mGPUs:
fasterRCNN = nn.DataParallel(fasterRCNN)
iters_per_epoch = int(train_size / args.batch_size)
if args.use_tfboard:
from tensorboardX import SummaryWriter
logger = SummaryWriter("logs")
for epoch in range(args.start_epoch, args.max_epochs + 1):
# setting to train mode
fasterRCNN.train()
loss_temp = 0
start = time.time()
if epoch % (args.lr_decay_step + 1) == 0:
adjust_learning_rate(optimizer, args.lr_decay_gamma)
lr *= args.lr_decay_gamma
data_iter = iter(dataloader)
for step in range(iters_per_epoch):
data = next(data_iter)
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
fasterRCNN.zero_grad()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label, _1, _2 = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
loss = rpn_loss_cls.mean() + rpn_loss_box.mean() \
+ RCNN_loss_cls.mean() + RCNN_loss_bbox.mean()
loss_temp += loss.item()
# backward
optimizer.zero_grad()
loss.backward()
if args.net == "vgg16":
clip_gradient(fasterRCNN, 10.)
optimizer.step()
if step % args.disp_interval == 0:
end = time.time()
if step > 0:
loss_temp /= (args.disp_interval + 1)
if args.mGPUs:
loss_rpn_cls = rpn_loss_cls.mean().item()
loss_rpn_box = rpn_loss_box.mean().item()
loss_rcnn_cls = RCNN_loss_cls.mean().item()
loss_rcnn_box = RCNN_loss_bbox.mean().item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
else:
loss_rpn_cls = rpn_loss_cls.item()
loss_rpn_box = rpn_loss_box.item()
loss_rcnn_cls = RCNN_loss_cls.item()
loss_rcnn_box = RCNN_loss_bbox.item()
fg_cnt = torch.sum(rois_label.data.ne(0))
bg_cnt = rois_label.data.numel() - fg_cnt
print("[session %d][epoch %2d][iter %4d/%4d] loss: %.4f, lr: %.2e" \
% (args.session, epoch, step, iters_per_epoch, loss_temp, lr))
print("\t\t\tfg/bg=(%d/%d), time cost: %f" % (fg_cnt, bg_cnt, end-start))
print("\t\t\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box %.4f" \
% (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box))
if args.use_tfboard:
info = {
'loss': loss_temp,
'loss_rpn_cls': loss_rpn_cls,
'loss_rpn_box': loss_rpn_box,
'loss_rcnn_cls': loss_rcnn_cls,
'loss_rcnn_box': loss_rcnn_box
}
logger.add_scalars("logs_s_{}/losses".format(args.session), info, (epoch - 1) * iters_per_epoch + step)
loss_temp = 0
start = time.time()
save_name = os.path.join(output_dir, 'faster_rcnn_{}_{}_{}.pth'.format(args.session, epoch, step))
save_checkpoint({
'session': args.session,
'epoch': epoch + 1,
'model': fasterRCNN.module.state_dict() if args.mGPUs else fasterRCNN.state_dict(),
'optimizer': optimizer.state_dict(),
'pooling_mode': cfg.POOLING_MODE,
'class_agnostic': args.class_agnostic,
}, save_name)
print('save model: {}'.format(save_name))
if args.use_tfboard:
logger.close()
|
py | b409eb5a85751104bb60e5ed10911948e0618744 | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 5, transform = "Fisher", sigma = 0.0, exog_count = 100, ar_order = 12); |
py | b409ebcd474abbfe748edc1d2a5159a0358c7b0a | """
Security constrained unit commitment considering line failures
1) Failure of transmission lines is depicted as scenarios
"""
|
py | b409ed8a30dda4407d22d605512efd00cf55c2af | from datetime import datetime
from unittest import TestCase
import re
import pytest
from conda.base.context import context, conda_tests_ctxt_mgmt_def_pol
from conda.common.compat import on_win
from conda.common.io import env_var
from .test_create import Commands, package_is_installed, get_conda_list_tuple, \
make_temp_env, run_command
@pytest.mark.integration
class PriorityIntegrationTests(TestCase):
def test_channel_order_channel_priority_true(self):
# This is broken, make_temp_env will reset the context. We get away with it, but really
# we need a function that does both these at the same time.
with env_var("CONDA_PINNED_PACKAGES", "python=3.6", stack_callback=conda_tests_ctxt_mgmt_def_pol):
with make_temp_env("pycosat==0.6.2") as prefix:
assert package_is_installed(prefix, 'python=3.6')
assert package_is_installed(prefix, 'pycosat')
# add conda-forge channel
o, e, _ = run_command(Commands.CONFIG, prefix, "--prepend", "channels", "conda-forge", '--json')
assert context.channels == ("conda-forge", "defaults"), o + e
# update --all
update_stdout, _, _ = run_command(Commands.UPDATE, prefix, '--all')
# this assertion works with the pinned_packages config to make sure
# conda update --all still respects the pinned python version
assert package_is_installed(prefix, 'python=3.6')
# pycosat should be in the SUPERSEDED list
# after the 4.4 solver work, looks like it's in the DOWNGRADED list
# This language needs changed anyway here.
# For packages that CHANGE because they're being moved to a higher-priority channel
# the message should be
#
# The following packages will be UPDATED to a higher-priority channel:
#
installed_str, x = update_stdout.split('UPDATED')
assert re.search(r'pkgs/main::pycosat-0.6.2-py36h[^\s]+ --> conda-forge::pycosat', x)
# python sys.version should show conda-forge python
python_tuple = get_conda_list_tuple(prefix, "python")
assert python_tuple[3] == 'conda-forge'
# conda list should show pycosat coming from conda-forge
pycosat_tuple = get_conda_list_tuple(prefix, "pycosat")
assert pycosat_tuple[3] == 'conda-forge'
def test_channel_priority_update(self):
"""
This case will fail now
"""
with make_temp_env("python=3.6.5", "pycosat") as prefix:
assert package_is_installed(prefix, 'python')
# add conda-forge channel
o, e, _ = run_command(Commands.CONFIG, prefix, "--prepend", "channels", "conda-forge", '--json')
assert context.channels == ("conda-forge", "defaults"), o+e
# update python
update_stdout, _, _ = run_command(Commands.UPDATE, prefix, 'python')
# pycosat should be in the SUPERSEDED list
superceded_split = update_stdout.split('UPDATED')
assert len(superceded_split) == 2
assert 'conda-forge' in superceded_split[1]
# python sys.version should show conda-forge python
python_tuple = get_conda_list_tuple(prefix, "python")
assert python_tuple[3] == 'conda-forge'
|
py | b409edfae4ae07e0b55d6519834567f3d678cdec | import config as c
import discord
from discord.ext import commands
async def user_is_developer(ctx):
is_owner = await ctx.bot.is_owner(ctx.author)
if is_owner:
return True
else:
return ctx.message.author.id in c.dev_id
#is_dev = await ctx.message.author.id in c.dev_id
#if is_dev:
# return True
#else:
# return False
def is_dev():
async def checker(ctx):
return await user_is_developer(ctx)
return commands.check(checker)
|
py | b409edfaf1c26448ffdfa7102114e30c137d3ed3 | import torch
import torch.nn as nn
import torchvision.models as models
from model.HolisticAttention import HA
from model.ResNet import B2_ResNet
class BasicConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_planes, out_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False)
self.bn = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class RFB(nn.Module):
# RFB-like multi-scale module
def __init__(self, in_channel, out_channel):
super(RFB, self).__init__()
self.relu = nn.ReLU(True)
self.branch0 = nn.Sequential(
BasicConv2d(in_channel, out_channel, 1),
)
self.branch1 = nn.Sequential(
BasicConv2d(in_channel, out_channel, 1),
BasicConv2d(out_channel, out_channel, kernel_size=(1, 3), padding=(0, 1)),
BasicConv2d(out_channel, out_channel, kernel_size=(3, 1), padding=(1, 0)),
BasicConv2d(out_channel, out_channel, 3, padding=3, dilation=3)
)
self.branch2 = nn.Sequential(
BasicConv2d(in_channel, out_channel, 1),
BasicConv2d(out_channel, out_channel, kernel_size=(1, 5), padding=(0, 2)),
BasicConv2d(out_channel, out_channel, kernel_size=(5, 1), padding=(2, 0)),
BasicConv2d(out_channel, out_channel, 3, padding=5, dilation=5)
)
self.branch3 = nn.Sequential(
BasicConv2d(in_channel, out_channel, 1),
BasicConv2d(out_channel, out_channel, kernel_size=(1, 7), padding=(0, 3)),
BasicConv2d(out_channel, out_channel, kernel_size=(7, 1), padding=(3, 0)),
BasicConv2d(out_channel, out_channel, 3, padding=7, dilation=7)
)
self.conv_cat = BasicConv2d(4*out_channel, out_channel, 3, padding=1)
self.conv_res = BasicConv2d(in_channel, out_channel, 1)
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
x_cat = self.conv_cat(torch.cat((x0, x1, x2, x3), 1))
x = self.relu(x_cat + self.conv_res(x))
return x
class aggregation(nn.Module):
# dense aggregation, it can be replaced by other aggregation model, such as DSS, amulet, and so on.
# used after MSF
def __init__(self, channel):
super(aggregation, self).__init__()
self.relu = nn.ReLU(True)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_upsample1 = BasicConv2d(channel, channel, 3, padding=1)
self.conv_upsample2 = BasicConv2d(channel, channel, 3, padding=1)
self.conv_upsample3 = BasicConv2d(channel, channel, 3, padding=1)
self.conv_upsample4 = BasicConv2d(channel, channel, 3, padding=1)
self.conv_upsample5 = BasicConv2d(2*channel, 2*channel, 3, padding=1)
self.conv_concat2 = BasicConv2d(2*channel, 2*channel, 3, padding=1)
self.conv_concat3 = BasicConv2d(3*channel, 3*channel, 3, padding=1)
self.conv4 = BasicConv2d(3*channel, 3*channel, 3, padding=1)
self.conv5 = nn.Conv2d(3*channel, 1, 1)
def forward(self, x1, x2, x3):
x1_1 = x1
x2_1 = self.conv_upsample1(self.upsample(x1)) * x2
x3_1 = self.conv_upsample2(self.upsample(self.upsample(x1))) \
* self.conv_upsample3(self.upsample(x2)) * x3
x2_2 = torch.cat((x2_1, self.conv_upsample4(self.upsample(x1_1))), 1)
x2_2 = self.conv_concat2(x2_2)
x3_2 = torch.cat((x3_1, self.conv_upsample5(self.upsample(x2_2))), 1)
x3_2 = self.conv_concat3(x3_2)
x = self.conv4(x3_2)
fea = x
x = self.conv5(x)
return x, fea
class CPD_ResNet(nn.Module):
# resnet based encoder decoder
def __init__(self, channel=32):
super(CPD_ResNet, self).__init__()
self.resnet = B2_ResNet()
self.rfb2_1 = RFB(512, channel)
self.rfb3_1 = RFB(1024, channel)
self.rfb4_1 = RFB(2048, channel)
self.agg1 = aggregation(channel)
self.rfb2_2 = RFB(512, channel)
self.rfb3_2 = RFB(1024, channel)
self.rfb4_2 = RFB(2048, channel)
self.agg2 = aggregation(channel)
self.upsample = nn.Upsample(scale_factor=8, mode='bilinear', align_corners=True)
self.HA = HA()
if self.training:
self.initialize_weights()
def forward(self, x):
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x1 = self.resnet.layer1(x) # 256 x 64 x 64
x2 = self.resnet.layer2(x1) # 512 x 32 x 32
x2_1 = x2
x3_1 = self.resnet.layer3_1(x2_1) # 1024 x 16 x 16
x4_1 = self.resnet.layer4_1(x3_1) # 2048 x 8 x 8
x2_1 = self.rfb2_1(x2_1)
x3_1 = self.rfb3_1(x3_1)
x4_1 = self.rfb4_1(x4_1)
attention_map,_ = self.agg1(x4_1, x3_1, x2_1)
x2_2 = self.HA(attention_map.sigmoid(), x2)
x3_2 = self.resnet.layer3_2(x2_2) # 1024 x 16 x 16
x4_2 = self.resnet.layer4_2(x3_2) # 2048 x 8 x 8
x2_2 = self.rfb2_2(x2_2)
x3_2 = self.rfb3_2(x3_2)
x4_2 = self.rfb4_2(x4_2)
detection_map,fea = self.agg2(x4_2, x3_2, x2_2)
# return self.upsample(attention_map), self.upsample(detection_map)
return self.upsample(attention_map), self.upsample(detection_map), self.upsample(fea)
def initialize_weights(self):
res50 = models.resnet50(pretrained=True)
pretrained_dict = res50.state_dict()
all_params = {}
for k, v in self.resnet.state_dict().items():
if k in pretrained_dict.keys():
v = pretrained_dict[k]
all_params[k] = v
elif '_1' in k:
name = k.split('_1')[0] + k.split('_1')[1]
v = pretrained_dict[name]
all_params[k] = v
elif '_2' in k:
name = k.split('_2')[0] + k.split('_2')[1]
v = pretrained_dict[name]
all_params[k] = v
assert len(all_params.keys()) == len(self.resnet.state_dict().keys())
self.resnet.load_state_dict(all_params)
|
py | b409ee2f71d21df4334996a63b44274c01843e62 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""
This module contains the classes required for dialogue management.
- DefaultDialogue: The dialogue class maintains state of a dialogue of type default and manages it.
- DefaultDialogues: The dialogues class keeps track of all dialogues of type default.
- OefSearchDialogue: The dialogue class maintains state of a dialogue of type oef_search and manages it.
- OefSearchDialogues: The dialogues class keeps track of all dialogues of type oef_search.
- TacDialogue: The dialogue class maintains state of a dialogue of type tac and manages it.
- TacDialogues: The dialogues class keeps track of all dialogues of type tac.
"""
from typing import Any
from aea.protocols.base import Address, Message
from aea.protocols.dialogue.base import Dialogue
from aea.skills.base import Model
from packages.fetchai.protocols.default.dialogues import (
DefaultDialogue as BaseDefaultDialogue,
)
from packages.fetchai.protocols.default.dialogues import (
DefaultDialogues as BaseDefaultDialogues,
)
from packages.fetchai.protocols.oef_search.dialogues import (
OefSearchDialogue as BaseOefSearchDialogue,
)
from packages.fetchai.protocols.oef_search.dialogues import (
OefSearchDialogues as BaseOefSearchDialogues,
)
from packages.fetchai.protocols.tac.dialogues import TacDialogue as BaseTacDialogue
from packages.fetchai.protocols.tac.dialogues import TacDialogues as BaseTacDialogues
DefaultDialogue = BaseDefaultDialogue
class DefaultDialogues(Model, BaseDefaultDialogues):
"""The dialogues class keeps track of all dialogues."""
def __init__(self, **kwargs: Any) -> None:
"""
Initialize dialogues.
:return: None
"""
Model.__init__(self, **kwargs)
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: Address
) -> Dialogue.Role:
"""Infer the role of the agent from an incoming/outgoing first message
:param message: an incoming/outgoing first message
:param receiver_address: the address of the receiving agent
:return: The role of the agent
"""
return DefaultDialogue.Role.AGENT
BaseDefaultDialogues.__init__(
self,
self_address=self.context.agent_address,
role_from_first_message=role_from_first_message,
)
OefSearchDialogue = BaseOefSearchDialogue
class OefSearchDialogues(Model, BaseOefSearchDialogues):
"""This class keeps track of all oef_search dialogues."""
def __init__(self, **kwargs: Any) -> None:
"""
Initialize dialogues.
:param agent_address: the address of the agent for whom dialogues are maintained
:return: None
"""
Model.__init__(self, **kwargs)
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: Address
) -> Dialogue.Role:
"""Infer the role of the agent from an incoming/outgoing first message
:param message: an incoming/outgoing first message
:param receiver_address: the address of the receiving agent
:return: The role of the agent
"""
return BaseOefSearchDialogue.Role.AGENT
BaseOefSearchDialogues.__init__(
self,
self_address=str(self.skill_id),
role_from_first_message=role_from_first_message,
)
TacDialogue = BaseTacDialogue
class TacDialogues(Model, BaseTacDialogues):
"""The dialogues class keeps track of all dialogues."""
def __init__(self, **kwargs: Any) -> None:
"""
Initialize dialogues.
:return: None
"""
Model.__init__(self, **kwargs)
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: Address
) -> Dialogue.Role:
"""Infer the role of the agent from an incoming/outgoing first message
:param message: an incoming/outgoing first message
:param receiver_address: the address of the receiving agent
:return: The role of the agent
"""
return TacDialogue.Role.CONTROLLER
BaseTacDialogues.__init__(
self,
self_address=self.context.agent_address,
role_from_first_message=role_from_first_message,
)
|
py | b409f0116a0de2ec8c063a2317f6fc7fd1c0f63f | import os
import logging
from functools import singledispatch
import crayons
import yaml
from jsonschema import ValidationError, validate, FormatChecker
from konverge.schema import PROXMOX_CLUSTER_SCHEMA, KUBE_CLUSTER_SCHEMA
class GenericConfigFile:
schema = None
filenames = tuple()
def __init__(self, filename: str = None):
self.filename = filename
self.config = self.read_yaml_file()
@property
def exists(self):
return os.path.exists(self.filename)
def read_yaml_file(self):
if not self.filename or not self.exists:
self.filename = self._get_default_filename()
return self._read_yaml_file_from_input()
def _get_default_filename(self):
for file in self.filenames:
if os.path.exists(file):
return file
logging.error(crayons.red(f'There is no file named: {self.filenames} in the current folder.'))
return None
def _read_yaml_file_from_input(self):
if not self.filename:
logging.error(crayons.red(f'Cannot read filename: {self.filename}'))
return None
try:
with open(self.filename, mode='r') as stream:
config = yaml.safe_load(stream)
except yaml.YAMLError as yaml_error:
logging.error(crayons.red(f'Error: failed to load from {self.filename}'))
logging.error(crayons.red(f'{yaml_error}'))
return
if not config:
logging.error(crayons.red(f'File {self.filename} is empty'))
return
return config
def validate(self):
if not self.schema:
return self.config
if not self.config:
logging.error(crayons.red(f'No serialized object generated from {self.filename}'))
return None
try:
validate(instance=self.config, schema=self.schema, format_checker=FormatChecker())
except ValidationError as validation_failed:
logging.error(crayons.red(f'{self.filename} invalid: {validation_failed}'))
return None
return self.config
def serialize(self):
if not self.config:
logging.error(crayons.red(f'No serialized object generated from {self.filename}'))
return None
validated = self.validate()
return ConfigSerializer(validated)
class ProxmoxClusterConfigFile(GenericConfigFile):
schema = PROXMOX_CLUSTER_SCHEMA
filenames = '.pve.yaml', '.pve.yml', '.proxmox.yaml', '.proxmox.yml'
class KubeClusterConfigFile(GenericConfigFile):
schema = KUBE_CLUSTER_SCHEMA
filenames = '.cluster.yaml', '.cluster.yml'
def serialize(self):
if not self.config:
logging.error(crayons.red(f'No serialized object generated from {self.filename}'))
return None
return self.validate()
class ConfigSerializer:
name: str
def __init__(self, config: dict):
self._serialize_values = singledispatch(self._serialize_values)
self._serialize_values.register(dict, self._serialize_values_dict)
self._serialize_values.register(list, self._serialize_values_list)
if config:
for key, value in config.items():
self._serialize_values(value, key)
def _serialize_values(self, value, key):
setattr(self, key, value)
def _serialize_values_dict(self, value, key):
setattr(self, key, ConfigSerializer(value))
def _serialize_values_list(self, value, key):
dispatch = lambda entry: ConfigSerializer(entry) if isinstance(entry, dict) else entry
setattr(self, key, [dispatch(unpacked) for unpacked in value])
def __repr__(self):
"""
Remove private and dunder methods from repr.
"""
attrs = str([attribute for attribute in dir(self) if '__' not in attribute and not attribute.startswith('_')])
return f'<{type(self).__name__}: {attrs}>'
|
py | b409f0c3a908e156f3e01343a2a8acebd16c342e | import argparse
import os
import sys
from glob import glob
from os.path import basename, join, splitext
import librosa
import numpy as np
import pysinsy
import soundfile as sf
from nnmnkwii.io import hts
from nnsvs.io.hts import get_note_indices
def _is_silence(label):
is_full_context = "@" in label
if is_full_context:
is_silence = "-sil" in label or "-pau" in label
else:
is_silence = label == "sil" or label == "pau"
return is_silence
def remove_sil_and_pau(lab):
newlab = hts.HTSLabelFile()
for label in lab:
if "-sil" not in label[-1] and "-pau" not in label[-1]:
newlab.append(label, strict=False)
return newlab
def get_parser():
parser = argparse.ArgumentParser(
description="Data preparation for PJS",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("pjs_root", type=str, help="PJS song dir")
parser.add_argument("out_dir", type=str, help="Output directory")
parser.add_argument("--gain-normalize", action="store_true")
return parser
args = get_parser().parse_args(sys.argv[1:])
out_dir = args.out_dir
gain_normalize = args.gain_normalize
# Time-lag constraints to filter outliers
timelag_allowed_range = (-20, 19)
timelag_allowed_range_rest = (-40, 39)
offset_correction_threshold = 0.01
# Make aligned full context labels
full_align_dir = join(out_dir, "label_phone_align")
full_score_dir = join(out_dir, "label_phone_score")
for d in [full_align_dir, full_score_dir]:
os.makedirs(d, exist_ok=True)
sinsy = pysinsy.sinsy.Sinsy()
assert sinsy.setLanguages("j", "/usr/local/lib/sinsy/dic")
mono_lab_files = sorted(glob(join(args.pjs_root, "**/*.lab")))
muxicxml_files = sorted(glob(join(args.pjs_root, "**/*.musicxml")))
assert len(mono_lab_files) == len(muxicxml_files)
for mono_path, xml_path in zip(mono_lab_files, muxicxml_files):
align_mono_lab = hts.load(mono_path)
name = basename(mono_path)
assert sinsy.loadScoreFromMusicXML(xml_path)
# check if sinsy's phoneme output is same as the provided alignment format
sinsy_labels = sinsy.createLabelData(True, 1, 1).getData()
sinsy_mono_lab = hts.HTSLabelFile()
for label in sinsy_labels:
sinsy_mono_lab.append(label.split(), strict=False)
assert len(align_mono_lab) == len(sinsy_mono_lab)
assert (
np.asarray(align_mono_lab.contexts) == np.asarray(sinsy_mono_lab.contexts)
).all()
# rounding
has_too_short_ph = False
for idx in range(len(align_mono_lab)):
b, e = align_mono_lab.start_times[idx], align_mono_lab.end_times[idx]
bb, ee = round(b / 50000) * 50000, round(e / 50000) * 50000
# TODO: better way
if bb == ee:
# ensure minimum frame length 1
align_mono_lab.end_times[idx] = align_mono_lab.start_times[idx] + 50000
align_mono_lab.start_times[idx + 1] = align_mono_lab.end_times[idx]
print(align_mono_lab[idx - 1 : idx + 2])
has_too_short_ph = True
if has_too_short_ph:
sinsy.clearScore()
else:
# gen full-context
sinsy_labels = sinsy.createLabelData(False, 1, 1).getData()
align_full_lab = hts.HTSLabelFile()
score_full_lab = hts.HTSLabelFile()
for idx, label in enumerate(sinsy_labels):
b, e = align_mono_lab.start_times[idx], align_mono_lab.end_times[idx]
try:
align_full_lab.append((b, e, label.split()[-1]), strict=True)
except BaseException:
# TODO
import ipdb
ipdb.set_trace()
b, e, c = label.split()
b, e = round(int(b) / 50000) * 50000, round(int(e) / 50000) * 50000
assert b != e
score_full_lab.append((b, e, c), strict=False)
with open(join(full_score_dir, name), "w") as of:
of.write(str(score_full_lab))
with open(join(full_align_dir, name), "w") as of:
of.write(str(align_full_lab))
sinsy.clearScore()
# Prepare data for time-lag models
dst_dir = join(out_dir, "timelag")
lab_align_dst_dir = join(dst_dir, "label_phone_align")
lab_score_dst_dir = join(dst_dir, "label_phone_score")
for d in [lab_align_dst_dir, lab_score_dst_dir]:
os.makedirs(d, exist_ok=True)
print("Prepare data for time-lag models")
full_lab_align_files = sorted(glob(join(full_align_dir, "*.lab")))
full_lab_score_files = sorted(glob(join(full_score_dir, "*.lab")))
for lab_align_path, lab_score_path in zip(full_lab_align_files, full_lab_score_files):
name = basename(lab_align_path)
lab_align = hts.load(lab_align_path)
lab_score = hts.load(lab_score_path)
# this may harm for computing offset
lab_align = remove_sil_and_pau(lab_align)
lab_score = remove_sil_and_pau(lab_score)
# Extract note onsets and let's compute a offset
note_indices = get_note_indices(lab_score)
onset_align = np.asarray(lab_align[note_indices].start_times)
onset_score = np.asarray(lab_score[note_indices].start_times)
global_offset = (onset_align - onset_score).mean()
global_offset = int(round(global_offset / 50000) * 50000)
# Apply offset correction only when there is a big gap
apply_offset_correction = np.abs(global_offset * 1e-7) > offset_correction_threshold
if apply_offset_correction:
print(f"{name}: Global offset (in sec): {global_offset * 1e-7}")
lab_score.start_times = list(np.asarray(lab_score.start_times) + global_offset)
lab_score.end_times = list(np.asarray(lab_score.end_times) + global_offset)
onset_score += global_offset
# Exclude large diff parts (probably a bug of musicxml or alignment though)
valid_note_indices = []
for idx, (a, b) in enumerate(zip(onset_align, onset_score)):
note_idx = note_indices[idx]
lag = np.abs(a - b) / 50000
if _is_silence(lab_score.contexts[note_idx]):
if (
lag >= timelag_allowed_range_rest[0]
and lag <= timelag_allowed_range_rest[1]
):
valid_note_indices.append(note_idx)
else:
if lag >= timelag_allowed_range[0] and lag <= timelag_allowed_range[1]:
valid_note_indices.append(note_idx)
if len(valid_note_indices) < len(note_indices):
D = len(note_indices) - len(valid_note_indices)
print(f"{name}: {D}/{len(note_indices)} time-lags are excluded.")
# Note onsets as labels
lab_align = lab_align[valid_note_indices]
lab_score = lab_score[valid_note_indices]
# Save lab files
lab_align_dst_path = join(lab_align_dst_dir, name)
with open(lab_align_dst_path, "w") as of:
of.write(str(lab_align))
lab_score_dst_path = join(lab_score_dst_dir, name)
with open(lab_score_dst_path, "w") as of:
of.write(str(lab_score))
# Prepare data for duration models
dst_dir = join(out_dir, "duration")
lab_align_dst_dir = join(dst_dir, "label_phone_align")
for d in [lab_align_dst_dir]:
os.makedirs(d, exist_ok=True)
print("Prepare data for duration models")
full_lab_align_files = sorted(glob(join(full_align_dir, "*.lab")))
for lab_align_path in full_lab_align_files:
name = basename(lab_align_path)
lab_align = hts.load(lab_align_path)
# Save lab file
lab_align_dst_path = join(lab_align_dst_dir, name)
with open(lab_align_dst_path, "w") as of:
of.write(str(lab_align))
# Prepare data for acoustic models
dst_dir = join(out_dir, "acoustic")
wav_dst_dir = join(dst_dir, "wav")
lab_align_dst_dir = join(dst_dir, "label_phone_align")
lab_score_dst_dir = join(dst_dir, "label_phone_score")
for d in [wav_dst_dir, lab_align_dst_dir, lab_score_dst_dir]:
os.makedirs(d, exist_ok=True)
print("Prepare data for acoustic models")
full_lab_align_files = sorted(glob(join(full_align_dir, "*.lab")))
full_lab_score_files = sorted(glob(join(full_score_dir, "*.lab")))
for lab_align_path, lab_score_path in zip(full_lab_align_files, full_lab_score_files):
name = splitext(basename(lab_align_path))[0]
wav_path = join(args.pjs_root, name, f"{name}_song.wav")
assert wav_path
# sr, wave = wavfile.read(wav_path)
wav, sr = librosa.load(wav_path, sr=48000)
if gain_normalize:
wav = wav / wav.max() * 0.99
lab_align = hts.load(lab_align_path)
lab_score = hts.load(lab_score_path)
# Save caudio
wav_dst_path = join(wav_dst_dir, name + ".wav")
# TODO: consider explicit subtype
sf.write(wav_dst_path, wav, sr)
# Save label
lab_align_dst_path = join(lab_align_dst_dir, name + ".lab")
with open(lab_align_dst_path, "w") as of:
of.write(str(lab_align))
lab_score_dst_path = join(lab_score_dst_dir, name + ".lab")
with open(lab_score_dst_path, "w") as of:
of.write(str(lab_score))
sys.exit(0)
|
py | b409f105a61c29023b67eb3854ab09e480005f65 |
from __future__ import absolute_import, division, print_function
import libtbx.load_env
from libtbx.utils import multi_out
from optparse import OptionParser
from six.moves import cStringIO as StringIO
import os.path as op
import os
import sys
# XXX these will be skipped (for various reasons)
ignore_modules = set([
"libtbx.crossmingw",
"libtbx.start_print_trace", # produces lots of output
"libtbx.command_line.epydoc_run",
"libtbx.command_line.ipython_shell_start",
"mmtbx.pdb_distances", # too much executed code
"gltbx.wx_viewer_leapmotion", # crashes if Leap installed
# non-CCTBX modules
"PyQuante.ThomasFermi", # uses reserved keyword 'with'
"elbow.example_script",
"phenix_regression",
"phenix_dev",
"phenix.autosol.bayes_5", # too much executed code
])
def has_init_py (path_name) :
for file_name in os.listdir(path_name) :
if (file_name == "__init__.py") :
return True
return False
def split_all (path_name) :
paths = []
while True :
leading_path, dir_name = op.split(path_name)
if (dir_name != '') :
paths.append(dir_name)
path_name = leading_path
else :
break
paths.reverse()
return paths
def run (args) :
verbose = False
parser = OptionParser()
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="Turn on verbose output")
parser.add_option("--skip-tests", dest="skip_tests", action="store_true",
help="Don't import modules beginning with 'tst'")
options, args = parser.parse_args(args)
module_list = []
if (len(args) == 0) :
module_list.extend([ m.name for m in libtbx.env.module_list ])
else :
for arg in args :
assert (arg in libtbx.env.module_dict), arg
module_list.append(arg)
has_stdout = []
stdout_old = sys.stdout
for module_name in module_list :
if (module_name in ignore_modules) :
continue
try :
module = __import__(module_name)
except ImportError as e:
print(e, file=sys.stderr)
continue
assert len(module.__path__) == 1
mod_path = module.__path__[0]
path_fields = split_all(mod_path)
n_leading_dirs = len(path_fields) - 1
for dirname, dirnames, filenames in os.walk(mod_path) :
for file_name in filenames :
if file_name.endswith(".py") and (file_name != "libtbx_refresh.py") :
py_mod_name, ext = op.splitext(file_name)
if (ext != '.py') or ("." in py_mod_name) :
if (options.verbose) :
print("skipping %s" % file_name, file=sys.stderr)
continue
py_path = split_all(dirname)[n_leading_dirs:]
import_name = ".".join(py_path)
if (not has_init_py(dirname)) or (import_name in ignore_modules) :
continue
top_level_module = py_path[0]
if (file_name != "__init__.py") :
import_name += "." + file_name[:-3]
if (import_name in ignore_modules) :
continue
elif ((file_name.startswith("tst_") or file_name.startswith("test_"))
and options.skip_tests) :
continue
if (options.verbose) :
print(import_name)
try :
sys.stdout = multi_out()
sys.stdout.register("stdout", stdout_old)
out = StringIO()
sys.stdout.register("stringio", out)
submodule = __import__(import_name)
if (out.getvalue() != '') :
has_stdout.append(import_name)
if (options.verbose) :
print(out.getvalue(), file=sys.stderr)
except ImportError as e :
print(e, file=sys.stderr)
finally :
sys.stdout = stdout_old
print("")
print("*" * 80)
print("ALL MODULES IMPORTED SUCCESSFULLY")
print("*" * 80)
print("")
if (len(has_stdout) > 0) :
print("Warning: %d modules print to stdout on import" % \
len(has_stdout), file=sys.stderr)
for import_name in has_stdout :
print(import_name, file=sys.stderr)
if (__name__ == "__main__") :
run(sys.argv[1:])
|
py | b409f31ef3331a6db07939d51feca311bdeb18e4 | """
Django settings for example project.
Generated by 'django-admin startproject' using Django 2.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@gu=--r-9y#a3pao$6^x49m7z6y-_wdub*e(%xnnj0=f1qti%!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'modelaudit',
'examples',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'modelaudit.middleware.CurrentUserMiddleware',
]
ROOT_URLCONF = 'example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
py | b409f346dfd869106ac110ad3534b49475331fc7 | import datetime
from pymongo import MongoClient
import twitter
import urllib.parse
# loading twitter tokens from a file
def load_tokens(path):
with open(path, 'r') as f:
# f = file(path, 'r')
for line in f.readlines():
if line.startswith("#"):
continue
parts = [x.strip() for x in line.split(",")]
(consumer_key, consumer_secret, auth_key, auth_secret) = parts
tokens = dict()
tokens["CLIENT_KEY"] = consumer_key
tokens["CLIENT_SECRET"] = consumer_secret
tokens["ATOKEN_KEY"] = auth_key
tokens["ATOKEN_SECRET"] = auth_secret
break # assume first token line needed
return tokens
# downloading new tweets from user's timeline - in blocks of up to 200
def get_user_tweets(api, user, s_id, m_id):
end = False
try:
statuses = api.GetUserTimeline(screen_name=user, since_id=s_id, max_id=m_id, count=200)
if len(statuses) == 0:
end = True
else:
print('\t%s has %i new tweets' % (user, len(statuses)))
return end,statuses
except twitter.error.TwitterError as err:
print(err.message[0]['message'])
exit()
# downloading replies to a user, twitter api does not allow downloading replies to a specific tweet - in blocks of up to 200
def get_replies_to_user(api, user, s_id, m_id):
end = False
try:
if m_id is not None:
q = urllib.parse.urlencode({"q": "to:%s since_id:%s max_id:%s " % (user, s_id, m_id)})
else:
q = urllib.parse.urlencode({"q": "to:%s since_id:%s" % (user, s_id)})
print(q, s_id, m_id)
statuses = api.GetSearch(raw_query=q, since_id=s_id, max_id=m_id, count=200)
if len(statuses) == 0:
end = True
else:
print('\t%s has %i new replies' % (user, len(statuses)))
return end,statuses
except twitter.error.TwitterError as err:
print(err.message[0]['message'])
exit()
# loads user screen names from a file, retrieves last user's tweet id and last reply to user's tweet available in database
def load_users(user_file, tweetsDB):
user_id2last_tweet_id = {}
user_id2last_reply_id = {}
with open(user_file, 'r') as f:
for line in f:
spl = line.strip().split("\t")
user_id = spl[0]
user_id2last_tweet_id[user_id] = "1"
user_id2last_reply_id[user_id] = "1"
try:
last_tweet = tweetsDB.find({"user.screen_name": user_id}).sort("created_at_mongo", -1).limit(1)
if last_tweet.count(True) == 1:
user_id2last_tweet_id[user_id] = last_tweet[0]['id_str']
last_reply = tweetsDB.find({"in_reply_to_screen_name":user_id}).sort("created_at_mongo", -1).limit(1)
if last_reply.count(True) == 1:
user_id2last_reply_id[user_id] = last_reply[0]['id_str']
except Exception as ex:
print (ex)
print ("Unable to find last downloaded tweet id or reply id for user", user_id)
return user_id2last_tweet_id, user_id2last_reply_id
# downloading tweets for a user - starting with tweet with a given id
def getAllTweetsForUser(api, user_id, newest_sid):
end, tweets = get_user_tweets(api, user_id, newest_sid, None)
tlist = tweets
oldest_current_id = "1"
if len(tweets)>0:
oldest_current_id = tweets[len(tweets) - 1].id
if not end:
while int(oldest_current_id) > int(newest_sid):
end, tweets = get_user_tweets(api, user_id, newest_sid, int(oldest_current_id) - 1)
if end:
break
print(len(tweets), len(tlist))
oldest_current_id = tweets[len(tweets) - 1].id
tlist += tweets
print(len(tlist))
return tlist
# downloading replies for a given user - starting with tweet with a given id
def getAllRepliesForUser(api, user_id, newest_sid):
print("doing replies now")
end, replies = get_replies_to_user(api, user_id, newest_sid, None)
rlist = replies
oldest_reply_id = "1"
if len(replies)>0:
oldest_reply_id = replies[len(replies) - 1].id
print(len(rlist))
if not end:
while int(oldest_reply_id) > int(newest_sid):
end, replies = get_replies_to_user(api, user_id, newest_sid, int(oldest_reply_id) - 1)
if end:
break
oldest_reply_id = replies[len(replies) - 1].id
rlist += replies
print(len(rlist))
return rlist
# saving downloaded tweets in mongo database
def saveTweets(tlist, tweetsDB, not_saved_in_db, exceptions):
tlist.reverse()
print(len(tlist))
for tweet in tlist:
try:
created_at = tweet.created_at
dt = datetime.datetime.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y')
json_tweet = tweet.AsDict()
json_tweet['created_at_mongo'] = dt
tweetsDB.insert_one(json_tweet)
except Exception as ex:
not_saved_in_db += 1
exceptions.append(ex)
def main():
# change in case your file with tokens has a different name
tokens = load_tokens("twittertokens.txt")
api = twitter.Api(
consumer_key=tokens["CLIENT_KEY"],
consumer_secret=tokens["CLIENT_SECRET"],
access_token_key=tokens["ATOKEN_KEY"],
access_token_secret=tokens["ATOKEN_SECRET"],
sleep_on_rate_limit=True
)
client = MongoClient()
# rename database and collection names in line below
tweetsDB = client["test"]["test"]
# change in case your file with user screen names has a different name
user_id2last_tweet_id, user_id2last_reply_id = load_users("user_list.txt", tweetsDB)
start = datetime.datetime.now()
print(start, "Starting collecting tweets")
i = 1
total_new = 0
not_saved_in_db = 0
exceptions = []
for user_id in user_id2last_tweet_id:
print("User", i, "out of", len(user_id2last_tweet_id), ". User id:", user_id)
i += 1
newest_saved_sid = user_id2last_tweet_id[user_id]
newest_saved_reply_sid = user_id2last_reply_id[user_id]
print("newest post", newest_saved_sid)
print("newest reply", newest_saved_reply_sid)
tlist = getAllTweetsForUser(api, user_id, newest_saved_sid)
rlist = getAllRepliesForUser(api, user_id, newest_saved_reply_sid)
# process the tweets we got in reverse order so that we maintain the #order of timestamps
saveTweets(tlist, tweetsDB, not_saved_in_db, exceptions)
saveTweets(rlist, tweetsDB, not_saved_in_db, exceptions)
total_new += len(tlist)
total_new += len(rlist)
end = datetime.datetime.now()
print(end, "Done collecting", total_new, "tweets.")
print("Took:", end - start)
print(not_saved_in_db, "not saved in db:")
for ex in exceptions:
print(ex)
main()
|
py | b409f44c65e2ce291c9b6f66de243bf4136e2d6f | from django.contrib import admin
from .models import Shop, ShopType, ShopBranch, Country, UserBranch, Cusine
admin.site.register(Shop)
admin.site.register(ShopType)
admin.site.register(ShopBranch)
admin.site.register(Country)
admin.site.register(UserBranch)
admin.site.register(Cusine)
|
py | b409f57e3b41235379db7dc7f280af840a45f8ed | import logging
import os
import random
from tqdm import tqdm
import numpy as np
import torch
from datetime import date
from torch.utils.data import DataLoader
from transformers import AutoConfig, AutoTokenizer
from models.bert_retriever import BERTRetriever
from data_classes.dr_datasets import RetrievalDataset, retrieval_collate
from utils.torch_utils import move_to_cuda, AverageMeter, load_saved
from config import train_args
from criterions import loss_dense
from torch.optim import Adam
from functools import partial
# import apex
def main():
args = train_args()
# args.do_train = True
# args.prefix = "XLMR_testing"
# args.predict_batch_size = 100
# args.model_name = "xlm-roberta-base"
# args.model_name = "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext"
# args.train_batch_size = 5
# args.learning_rate = 2e-5
# args.fp16 = True
# args.train_file = "COUGH/retrieval_train.txt"
# args.predict_file = "COUGH/retrieval_dev.txt"
# args.seed = 16
# args.eval_period = 300
# args.max_c_len = 300
# args.max_q_len = 30
# args.warmup_ratio = 0.1
# args.num_train_epochs = 20
# args.output_dir = "XLMR_retriever"
# if args.fp16:
# apex.amp.register_half_function(torch, 'einsum')
date_curr = date.today().strftime("%m-%d-%Y")
model_name = f"{args.prefix}-seed{args.seed}-bsz{args.train_batch_size}-fp16{args.fp16}-lr{args.learning_rate}-{args.model_name}"
args.output_dir = os.path.join(args.output_dir, date_curr, model_name)
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
print(f"output directory {args.output_dir} already exists and is not empty.")
os.makedirs(args.output_dir, exist_ok=True)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
handlers=[logging.FileHandler(os.path.join(args.output_dir, "log.txt")), logging.StreamHandler()],
)
logger = logging.getLogger(__name__)
logger.info(args)
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend="nccl")
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1))
args.train_batch_size = int(args.train_batch_size / args.accumulate_gradients)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
model_config = AutoConfig.from_pretrained(args.model_name)
model = BERTRetriever(args.model_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
collate_fc = partial(retrieval_collate, pad_id=tokenizer.pad_token_id, model_name=args.model_name)
if args.do_train and args.max_c_len > model_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" % (args.max_c_len, model_config.max_position_embeddings)
)
eval_dataset = RetrievalDataset(tokenizer, args.predict_file, args)
eval_dataloader = DataLoader(
eval_dataset,
batch_size=args.predict_batch_size,
collate_fn=collate_fc,
pin_memory=True,
num_workers=args.num_workers,
)
logger.info(f"Num of dev batches: {len(eval_dataloader)}")
if args.init_checkpoint != "":
logger.info("Loading best checkpoint")
model = load_saved(model, args.init_checkpoint)
model.to(device)
logger.info(f"number of trainable parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
if args.do_train:
sparse_params = [p for n, p in model.named_parameters() if "qz_loga" in n]
optimizer_parameters = [
{"params": sparse_params, "lr": args.learning_rate},
{"params": [p for n, p in model.named_parameters() if "qz_loga" not in n]},
]
optimizer = Adam(optimizer_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
if args.fp16:
scaler = torch.cuda.amp.GradScaler()
# model, optimizer = apex.amp.initialize(
# model, optimizer, opt_level=args.fp16_opt_level)
# else:
# if args.fp16:
# model = apex.amp.initialize(model, opt_level=args.fp16_opt_level)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
if args.do_train:
global_step = 0 # gradient update step
batch_step = 0 # forward batch count
best_mrr = 0
train_loss_meter = AverageMeter()
model.train()
train_dataset = RetrievalDataset(tokenizer, args.train_file, args)
train_dataloader = DataLoader(
train_dataset,
batch_size=args.train_batch_size,
pin_memory=True,
collate_fn=collate_fc,
num_workers=args.num_workers,
shuffle=True,
)
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
logger.info("Start training....")
for epoch in range(int(args.num_train_epochs)):
for batch in tqdm(train_dataloader):
batch_step += 1
batch = move_to_cuda(batch)
if args.fp16:
with torch.cuda.amp.autocast():
losses = loss_dense(model, batch)
loss = losses["retr_loss"]
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
scaler.scale(loss).backward()
# with apex.amp.scale_loss(loss, optimizer) as scaled_loss:
# scaled_loss.backward()
else:
loss.backward()
train_loss_meter.update(loss.item())
if (batch_step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scaler.step(optimizer)
scaler.update()
# torch.nn.utils.clip_grad_norm_(
# apex.amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
# scheduler.step()
# model.zero_grad()
optimizer.zero_grad()
global_step += 1
if args.eval_period != -1 and global_step % args.eval_period == 0:
mrr = predict(args, model, eval_dataloader, device, logger, tokenizer)
logger.info(
"Step %d Train loss %.2f MRR %.2f on epoch=%d"
% (global_step, train_loss_meter.avg, mrr * 100, epoch)
)
if best_mrr < mrr:
logger.info(
"Saving model with best MRR %.2f -> MRR %.2f on epoch=%d"
% (best_mrr * 100, mrr * 100, epoch)
)
torch.save(model.state_dict(), os.path.join(args.output_dir, f"checkpoint_best.pt"))
model = model.to(device)
best_mrr = mrr
mrr = predict(args, model, eval_dataloader, device, logger, tokenizer)
logger.info(
"Step %d Train loss %.2f MRR %.2f on epoch=%d" % (global_step, train_loss_meter.avg, mrr * 100, epoch)
)
if best_mrr < mrr:
torch.save(model.state_dict(), os.path.join(args.output_dir, f"checkpoint_last.pt"))
logger.info(
"Saving model with best MRR %.2f -> MRR %.2f on epoch=%d" % (best_mrr * 100, mrr * 100, epoch)
)
torch.save(model.state_dict(), os.path.join(args.output_dir, f"checkpoint_best.pt"))
model = model.to(device)
best_mrr = mrr
logger.info("Training finished!")
elif args.do_predict:
mrr = predict(args, model, eval_dataloader, device, logger, tokenizer)
logger.info(f"test performance {mrr}")
def predict(args, model, eval_dataloader, device, logger, tokenizer):
model.eval()
num_correct, num_total, rrs = 0, 0, [] # reciprocal rank
f1s, ems = [], [] # augmentation accuracy
sparse_ratio_q = []
sparse_ratio_c = []
if args.quantization:
num_correct_quant, num_total_quant, rrs_quant = 0, 0, []
def cal_metric(q, c, neg_c):
product_in_batch = torch.mm(q, c.t())
product_neg = (q * neg_c).sum(-1).unsqueeze(1)
product = torch.cat([product_in_batch, product_neg], dim=-1)
target = torch.arange(product.size(0)).to(product.device)
ranked = product.argsort(dim=1, descending=True)
prediction = product.argmax(-1)
# MRR
batch_rrs = []
idx2rank = ranked.argsort(dim=1)
for idx, t in enumerate(target.tolist()):
batch_rrs.append(1 / (idx2rank[idx][t].item() + 1))
pred_res = prediction == target
batch_total = pred_res.size(0)
batch_correct = pred_res.sum(0)
return {"batch_rrs": batch_rrs, "batch_total": batch_total, "batch_correct": batch_correct}
for batch in tqdm(eval_dataloader):
batch = move_to_cuda(batch)
with torch.no_grad():
outputs = model(batch)
q, c, neg_c = outputs["q"], outputs["c"], outputs["neg_c"]
# calculate the sparsity
sparse_ratio_q += (torch.count_nonzero(q, dim=1) / q.size(1)).tolist()
sparse_ratio_c += (torch.count_nonzero(c, dim=1) / c.size(1)).tolist()
batch_metrics = cal_metric(q, c, neg_c)
rrs += batch_metrics["batch_rrs"]
num_correct += batch_metrics["batch_correct"]
num_total += batch_metrics["batch_total"]
acc = num_correct / num_total
mrr = np.mean(rrs)
logger.info(f"evaluated {num_total} examples...")
logger.info(f"avg. Acc: {acc:.3f}")
logger.info(f"avg. MRR: {mrr:.3f}")
logger.info(f"avg sparsity question: {np.mean(sparse_ratio_q)}, {len(sparse_ratio_q)}")
logger.info(f"avg sparsity context: {np.mean(sparse_ratio_c)}, {len(sparse_ratio_c)}")
model.train()
return mrr
if __name__ == "__main__":
main()
|
py | b409f5fd81d904a68ce749a29506de5421373a1a | from develops_today_test.settings import *
DEBUG = False
ALLOWED_HOSTS = ["*"]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
|
py | b409f85a4aa30c50bc2419c3ff6899d54a7d7851 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.utils import try_import
from ..albert.tokenizer import AlbertEnglishTokenizer
import warnings
__all__ = ['T5Tokenizer', ]
class T5Tokenizer(AlbertEnglishTokenizer):
"""
Constructs a T5 tokenizer based on SentencePiece .
This tokenizer inherits from :class:`~paddlenlp.transformers.tokenizer_utils.PretrainedTokenizer`
which contains most of the main methods. For more information regarding those methods,
please refer to this superclass.
Args:
sentencepiece_model_file (str):
The vocabulary file (ends with '.spm') required to instantiate
a `SentencePiece <https://github.com/google/sentencepiece>`__ tokenizer.
do_lower_case (bool):
Whether or not to lowercase the input when tokenizing. Defaults to `False`.
remove_space (bool):
Whether or note to remove space when tokenizing. Defaults to `True`.
keep_accents (bool):
Whether or note to keep accents when tokenizing. Defaults to `False`.
eos_token (str):
A special token representing the *eos (end-of-sentence)* token.
Defaults to "</s>".
unk_token (str):
A special token representing the *unknown (out-of-vocabulary)* token.
An unknown token is set to be `unk_token` inorder to be converted to an ID.
Defaults to "<unk>".
pad_token (str):
A special token used to make arrays of tokens the same size for batching purposes.
Defaults to "<pad>".
"""
resource_files_names = {"sentencepiece_model_file": "spiece.model"}
pretrained_resource_files_map = {
"sentencepiece_model_file": {
"t5-small":
"https://paddlenlp.bj.bcebos.com/models/transformers/t5/t5-small/spiece.model",
"t5-base":
"https://paddlenlp.bj.bcebos.com/models/transformers/t5/t5-base/spiece.model",
"t5-large":
"https://paddlenlp.bj.bcebos.com/models/transformers/t5/t5-large/spiece.model",
},
}
pretrained_init_configuration = {
"t5-small": {
"do_lower_case": False
},
"t5-base": {
"do_lower_case": False
},
"t5-large": {
"do_lower_case": False
},
}
def __init__(self,
sentencepiece_model_file,
do_lower_case=False,
remove_space=True,
keep_accents=False,
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
**kwargs):
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.sentencepiece_model_file = sentencepiece_model_file
spm = try_import("sentencepiece")
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(sentencepiece_model_file)
def __call__(self,
text,
text_pair=None,
max_seq_len=None,
stride=0,
is_split_into_words=False,
pad_to_max_seq_len=False,
truncation_strategy="longest_first",
return_position_ids=False,
return_token_type_ids=False,
return_attention_mask=True,
return_length=False,
return_overflowing_tokens=False,
return_special_tokens_mask=False):
return super(T5Tokenizer, self).__call__(
text, text_pair, max_seq_len, stride, is_split_into_words,
pad_to_max_seq_len, truncation_strategy, return_position_ids,
return_token_type_ids, return_attention_mask, return_length,
return_overflowing_tokens, return_special_tokens_mask)
def _add_eos_if_not_present(self, token_ids):
"""Do not add eos again if user already added it."""
if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated eos tokens being added."
)
return token_ids
else:
return token_ids + [self.eos_token_id]
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1):
"""
Build model inputs from a sequence or a pair of sequence.
An Reformer sequence has the following format:
- single sequence: ``X </s>``
- pair of sequences: ``A </s> B </s>``
Args:
token_ids_0 (List[int]):
List of IDs to which the special tokens will be added.
token_ids_1 (List[int], optional):
Optional second list of IDs for sequence pairs. Defaults to None.
Returns:
List[int]: List of input_id with the appropriate special tokens.
"""
token_ids_0 = self._add_eos_if_not_present(token_ids_0)
if token_ids_1 is None:
return token_ids_0
else:
token_ids_1 = self._add_eos_if_not_present(token_ids_1)
return token_ids_0 + token_ids_1
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
"""
Create a mask from the two sequences.
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (List[int]):
List of IDs.
token_ids_1 (List[int], optional):
Optional second list of IDs for sequence pairs.
Returns:
List[int]: List of token_type_id according to the given sequence(s).
"""
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``encode`` methods.
Args:
token_ids_0 (List[int]): List of ids of the first sequence.
token_ids_1 (List[int], optional): List of ids of the second sequence.
already_has_special_tokens (bool, optional): Whether or not the token list is already
formatted with special tokens for the model. Defaults to None.
Returns:
List[int]: The list of integers in the range [0, 1]:
1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0,
token_ids_1=token_ids_1,
already_has_special_tokens=True, )
# normal case: some special tokens
if token_ids_1 is None:
return ([0] * len(token_ids_0)) + [1]
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
current_sub_tokens = []
out_string = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += (self.sp_model.decode_pieces(current_sub_tokens) +
token + " ")
current_sub_tokens = []
else:
current_sub_tokens.append(token)
out_string += self.sp_model.decode_pieces(current_sub_tokens)
return out_string.strip()
def decode(self,
token_ids,
skip_special_tokens=False,
clean_up_tokenization_spaces=True):
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
Args:
token_ids (Union[List[int], Tensor]):
List of tokenized input ids.
skip_special_tokens (bool, optional):
Whether or not to remove special tokens in the decoding. Defaults to `False`.
clean_up_tokenization_spaces (bool, optional):
Whether or not to clean up the tokenization spaces. Defaults to `True`.
Returns:
str: The decoded sentence.
"""
if hasattr(token_ids, "tolist"):
token_ids = token_ids.tolist()
text = self.convert_tokens_to_string(
self.convert_ids_to_tokens(
token_ids, skip_special_tokens=skip_special_tokens))
if clean_up_tokenization_spaces:
text = self.clean_up_tokenization(text)
return text
def batch_decode(self,
sequences,
skip_special_tokens=False,
clean_up_tokenization_spaces=True):
"""
Convert a list of lists of token ids into a list of strings by calling decode.
Args:
sequences (Union[List[int], List[List[int]], Tensor]):
List of tokenized input ids.
skip_special_tokens (bool, optional):
Whether or not to remove special tokens in the decoding. Defaults to `False`.
clean_up_tokenization_spaces (bool, optional):
Whether or not to clean up the tokenization spaces. Defaults to `True`.
Returns:
List[str]: The list of decoded sentences.
"""
return [
self.decode(
seq,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces)
for seq in sequences
]
@staticmethod
def clean_up_tokenization(out_string):
"""
Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.
Args:
out_string (str): The text to clean up.
Returns:
str: The cleaned-up string.
"""
out_string = (out_string.replace(" .", ".").replace(" ?", "?")
.replace(" !", "!").replace(" ,", ",").replace(" ' ", "'")
.replace(" n't", "n't").replace(" 'm", "'m")
.replace(" 's", "'s").replace(" 've", "'ve")
.replace(" 're", "'re"))
return out_string
|
py | b409f8dadea0670d37ee28c7c0c70567efb46f42 | """
Simple Object Panel
+++++++++++++++++++
This panel has a :class:`Panel.poll` and :class:`Panel.draw_header` function,
even though the contents is basic this closely resembles blenders panels.
"""
import bpy
class ObjectSelectPanel(bpy.types.Panel):
bl_idname = "OBJECT_PT_select"
bl_label = "Select"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "object"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
return (context.object is not None)
def draw_header(self, context):
layout = self.layout
obj = context.object
layout.prop(obj, "select", text="")
def draw(self, context):
layout = self.layout
obj = context.object
row = layout.row()
row.prop(obj, "hide_select")
row.prop(obj, "hide_render")
box = layout.box()
box.label(text="Selection Tools")
box.operator("object.select_all").action = 'TOGGLE'
row = box.row()
row.operator("object.select_all").action = 'INVERT'
row.operator("object.select_random")
bpy.utils.register_class(ObjectSelectPanel)
|
py | b409f929916d785bb1c629d44052156d263a59ba | from model.contact import Contact
from model.contact import Contact
from random import randrange
def test_delete_some_contact(app):
if app.contact.count()==0:
app.contact.create(Contact(first_name="test"))
old_contacts = app.contact.get_contact_list()
index=randrange(len(old_contacts))
app.contact.delete_contact_by_index(index)
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts[index:index+1]=[]
assert old_contacts == new_contacts
|
py | b409f95cab30e0b40f52a58dd0261e931e6b3acd | import os
import sys
lib_dir = os.path.join(os.path.dirname(__file__), "lib")
src_dir = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(lib_dir)
sys.path.append(src_dir)
import src.Wav2Vec2FBX.__main__ # noqa
|
py | b409fb7953ddbda9ba3e8a5d7c0f583931ac3613 | import copy
import logging
from abc import ABC, abstractmethod
from enum import Enum
from typing import Any, Dict, Iterable, Optional, Tuple, Union
import pandas as pd
from ruamel.yaml import YAML
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import BatchMarkers, BatchSpec
from great_expectations.expectations.registry import get_metric_provider
from great_expectations.util import filter_properties_dict
from great_expectations.validator.validation_graph import MetricConfiguration
logger = logging.getLogger(__name__)
yaml = YAML()
yaml.default_flow_style = False
class NoOpDict:
def __getitem__(self, item):
return None
def __setitem__(self, key, value):
return None
def update(self, value):
return None
class BatchData:
def __init__(self, execution_engine):
self._execution_engine = execution_engine
@property
def execution_engine(self):
return self._execution_engine
def head(self, *args, **kwargs):
# CONFLICT ON PURPOSE. REMOVE.
return pd.DataFrame({})
class MetricFunctionTypes(Enum):
VALUE = "value"
MAP_VALUES = "value" # "map_values"
WINDOW_VALUES = "value" # "window_values"
AGGREGATE_VALUE = "value" # "aggregate_value"
class MetricDomainTypes(Enum):
COLUMN = "column"
COLUMN_PAIR = "column_pair"
MULTICOLUMN = "multicolumn"
TABLE = "table"
class ExecutionEngine(ABC):
recognized_batch_spec_defaults = set()
def __init__(
self,
name=None,
caching=True,
batch_spec_defaults=None,
batch_data_dict=None,
validator=None,
):
self.name = name
self._validator = validator
# NOTE: using caching makes the strong assumption that the user will not modify the core data store
# (e.g. self.spark_df) over the lifetime of the dataset instance
self._caching = caching
# NOTE: 20200918 - this is a naive cache; update.
if self._caching:
self._metric_cache = {}
else:
self._metric_cache = NoOpDict()
if batch_spec_defaults is None:
batch_spec_defaults = {}
batch_spec_defaults_keys = set(batch_spec_defaults.keys())
if not batch_spec_defaults_keys <= self.recognized_batch_spec_defaults:
logger.warning(
"Unrecognized batch_spec_default(s): %s"
% str(batch_spec_defaults_keys - self.recognized_batch_spec_defaults)
)
self._batch_spec_defaults = {
key: value
for key, value in batch_spec_defaults.items()
if key in self.recognized_batch_spec_defaults
}
self._batch_data_dict = {}
if batch_data_dict is None:
batch_data_dict = {}
self._active_batch_data_id = None
self._load_batch_data_from_dict(batch_data_dict)
# Gather the call arguments of the present function (and add the "class_name"), filter out the Falsy values, and
# set the instance "_config" variable equal to the resulting dictionary.
self._config = {
"name": name,
"caching": caching,
"batch_spec_defaults": batch_spec_defaults,
"batch_data_dict": batch_data_dict,
"validator": validator,
"module_name": self.__class__.__module__,
"class_name": self.__class__.__name__,
}
filter_properties_dict(properties=self._config, clean_falsy=True, inplace=True)
def configure_validator(self, validator):
"""Optionally configure the validator as appropriate for the execution engine."""
pass
@property
def active_batch_data_id(self):
"""The batch id for the default batch data.
When an execution engine is asked to process a compute domain that does
not include a specific batch_id, then the data associated with the
active_batch_data_id will be used as the default.
"""
if self._active_batch_data_id is not None:
return self._active_batch_data_id
elif len(self.loaded_batch_data_dict) == 1:
return list(self.loaded_batch_data_dict.keys())[0]
else:
return None
@active_batch_data_id.setter
def active_batch_data_id(self, batch_id):
if batch_id in self.loaded_batch_data_dict.keys():
self._active_batch_data_id = batch_id
else:
raise ge_exceptions.ExecutionEngineError(
f"Unable to set active_batch_data_id to {batch_id}. The may data may not be loaded."
)
@property
def active_batch_data(self):
"""The data from the currently-active batch."""
if self.active_batch_data_id is None:
return None
else:
return self.loaded_batch_data_dict.get(self.active_batch_data_id)
@property
def loaded_batch_data_dict(self):
"""The current dictionary of batches."""
return self._batch_data_dict
@property
def loaded_batch_data_ids(self):
return list(self.loaded_batch_data_dict.keys())
@property
def config(self) -> dict:
return self._config
@property
def dialect(self):
return None
def get_batch_data(
self,
batch_spec: BatchSpec,
) -> Any:
"""Interprets batch_data and returns the appropriate data.
This method is primarily useful for utility cases (e.g. testing) where
data is being fetched without a DataConnector and metadata like
batch_markers is unwanted
Note: this method is currently a thin wrapper for get_batch_data_and_markers.
It simply suppresses the batch_markers.
"""
batch_data, _ = self.get_batch_data_and_markers(batch_spec)
return batch_data
@abstractmethod
def get_batch_data_and_markers(self, batch_spec) -> Tuple[BatchData, BatchMarkers]:
raise NotImplementedError
def load_batch_data(self, batch_id: str, batch_data: Any) -> None:
"""
Loads the specified batch_data into the execution engine
"""
self._batch_data_dict[batch_id] = batch_data
self._active_batch_data_id = batch_id
def _load_batch_data_from_dict(self, batch_data_dict):
"""
Loads all data in batch_data_dict into load_batch_data
"""
for batch_id, batch_data in batch_data_dict.items():
self.load_batch_data(batch_id, batch_data)
def resolve_metrics(
self,
metrics_to_resolve: Iterable[MetricConfiguration],
metrics: Optional[Dict[Tuple, MetricConfiguration]] = None,
runtime_configuration: Optional[dict] = None,
) -> Dict[Tuple, Any]:
"""resolve_metrics is the main entrypoint for an execution engine. The execution engine will compute the value
of the provided metrics.
Args:
metrics_to_resolve: the metrics to evaluate
metrics: already-computed metrics currently available to the engine
runtime_configuration: runtime configuration information
Returns:
resolved_metrics (Dict): a dictionary with the values for the metrics that have just been resolved.
"""
if metrics is None:
metrics = {}
resolved_metrics: Dict[Tuple, MetricConfiguration] = {}
metric_fn_bundle = []
for metric_to_resolve in metrics_to_resolve:
metric_dependencies = {}
for k, v in metric_to_resolve.metric_dependencies.items():
if v.id in metrics:
metric_dependencies[k] = metrics[v.id]
elif self._caching and v.id in self._metric_cache:
metric_dependencies[k] = self._metric_cache[v.id]
else:
raise ge_exceptions.MetricError(
message=f'Missing metric dependency: {str(k)} for metric "{metric_to_resolve.metric_name}".'
)
metric_class, metric_fn = get_metric_provider(
metric_name=metric_to_resolve.metric_name, execution_engine=self
)
metric_provider_kwargs = {
"cls": metric_class,
"execution_engine": self,
"metric_domain_kwargs": metric_to_resolve.metric_domain_kwargs,
"metric_value_kwargs": metric_to_resolve.metric_value_kwargs,
"metrics": metric_dependencies,
"runtime_configuration": runtime_configuration,
}
if metric_fn is None:
try:
(
metric_fn,
compute_domain_kwargs,
accessor_domain_kwargs,
) = metric_dependencies.pop("metric_partial_fn")
except KeyError as e:
raise ge_exceptions.MetricError(
message=f'Missing metric dependency: {str(e)} for metric "{metric_to_resolve.metric_name}".'
)
metric_fn_bundle.append(
(
metric_to_resolve,
metric_fn,
compute_domain_kwargs,
accessor_domain_kwargs,
metric_provider_kwargs,
)
)
continue
metric_fn_type = getattr(
metric_fn, "metric_fn_type", MetricFunctionTypes.VALUE
)
if metric_fn_type in [
MetricPartialFunctionTypes.MAP_SERIES,
MetricPartialFunctionTypes.MAP_FN,
MetricPartialFunctionTypes.MAP_CONDITION_FN,
MetricPartialFunctionTypes.MAP_CONDITION_SERIES,
MetricPartialFunctionTypes.WINDOW_FN,
MetricPartialFunctionTypes.WINDOW_CONDITION_FN,
MetricPartialFunctionTypes.AGGREGATE_FN,
]:
# NOTE: 20201026 - JPC - we could use the fact that these metric functions return functions rather
# than data to optimize compute in the future
try:
resolved_metrics[metric_to_resolve.id] = metric_fn(
**metric_provider_kwargs
)
except (
ge_exceptions.MetricComputationError,
ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError,
) as e:
raise ge_exceptions.MetricResolutionError(
message=str(e), failed_metrics=(metric_to_resolve,)
)
elif metric_fn_type == MetricFunctionTypes.VALUE:
try:
resolved_metrics[metric_to_resolve.id] = metric_fn(
**metric_provider_kwargs
)
except (
ge_exceptions.MetricComputationError,
ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError,
) as e:
raise ge_exceptions.MetricResolutionError(
message=str(e), failed_metrics=(metric_to_resolve,)
)
else:
logger.warning(
f"Unrecognized metric function type while trying to resolve {str(metric_to_resolve.id)}"
)
try:
resolved_metrics[metric_to_resolve.id] = metric_fn(
**metric_provider_kwargs
)
except (
ge_exceptions.MetricComputationError,
ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError,
) as e:
raise ge_exceptions.MetricResolutionError(
message=str(e), failed_metrics=(metric_to_resolve,)
)
if len(metric_fn_bundle) > 0:
try:
new_resolved = self.resolve_metric_bundle(metric_fn_bundle)
resolved_metrics.update(new_resolved)
except (
ge_exceptions.MetricComputationError,
ge_exceptions.InvalidMetricAccessorDomainKwargsKeyError,
) as e:
raise ge_exceptions.MetricResolutionError(
message=str(e), failed_metrics=[x[0] for x in metric_fn_bundle]
)
if self._caching:
self._metric_cache.update(resolved_metrics)
return resolved_metrics
def resolve_metric_bundle(self, metric_fn_bundle):
"""Resolve a bundle of metrics with the same compute domain as part of a single trip to the compute engine."""
raise NotImplementedError
def get_domain_records(
self,
domain_kwargs: dict,
) -> Any:
"""
get_domain_records computes the full-access data (dataframe or selectable) for computing metrics based on the
given domain_kwargs and specific engine semantics.
Returns:
data corresponding to the compute domain
"""
raise NotImplementedError
def get_compute_domain(
self,
domain_kwargs: dict,
domain_type: Union[str, MetricDomainTypes],
) -> Tuple[Any, dict, dict]:
"""get_compute_domain computes the optimal domain_kwargs for computing metrics based on the given domain_kwargs
and specific engine semantics.
Returns:
A tuple consisting of three elements:
1. data corresponding to the compute domain;
2. a modified copy of domain_kwargs describing the domain of the data returned in (1);
3. a dictionary describing the access instructions for data elements included in the compute domain
(e.g. specific column name).
In general, the union of the compute_domain_kwargs and accessor_domain_kwargs will be the same as the domain_kwargs
provided to this method.
"""
raise NotImplementedError
def add_column_row_condition(
self, domain_kwargs, column_name=None, filter_null=True, filter_nan=False
):
"""EXPERIMENTAL
Add a row condition for handling null filter.
Args:
domain_kwargs: the domain kwargs to use as the base and to which to add the condition
column_name: if provided, use this name to add the condition; otherwise, will use "column" key from table_domain_kwargs
filter_null: if true, add a filter for null values
filter_nan: if true, add a filter for nan values
"""
if filter_null is False and filter_nan is False:
logger.warning(
"add_column_row_condition called with no filter condition requested"
)
return domain_kwargs
if filter_nan:
raise ge_exceptions.GreatExpectationsError(
"Base ExecutionEngine does not support adding nan condition filters"
)
if "row_condition" in domain_kwargs and domain_kwargs["row_condition"]:
raise ge_exceptions.GreatExpectationsError(
"ExecutionEngine does not support updating existing row_conditions."
)
new_domain_kwargs = copy.deepcopy(domain_kwargs)
assert "column" in domain_kwargs or column_name is not None
if column_name is not None:
column = column_name
else:
column = domain_kwargs["column"]
new_domain_kwargs["condition_parser"] = "great_expectations__experimental__"
new_domain_kwargs["row_condition"] = f'col("{column}").notnull()'
return new_domain_kwargs
class MetricPartialFunctionTypes(Enum):
MAP_FN = "map_fn"
MAP_SERIES = "map_series"
MAP_CONDITION_FN = "map_condition_fn"
MAP_CONDITION_SERIES = "map_condition_series"
WINDOW_FN = "window_fn"
WINDOW_CONDITION_FN = "window_condition_fn"
AGGREGATE_FN = "aggregate_fn"
@property
def metric_suffix(self):
if self.name in ["MAP_FN", "MAP_SERIES", "WINDOW_FN"]:
return "map"
elif self.name in [
"MAP_CONDITION_FN",
"MAP_CONDITION_SERIES",
"WINDOW_CONDITION_FN",
]:
return "condition"
elif self.name in ["AGGREGATE_FN"]:
return "aggregate_fn"
|
py | b409fc93083b7524b2f95e3a102140141b106f3c | import cv2
import numpy as np
from seaborn import color_palette
from PIL import Image, ImageDraw, ImageFont
def load_class_names(file_name):
"""
Returns a list of class names read from `file_name`.
"""
with open(file_name, 'r') as f:
class_names = f.read().splitlines()
return class_names
def draw_boxes(img_array, boxes_dicts, class_names, model_size):
"""
Draws detected boxes.
Args:
img_names: A list of input images names.
boxes_dict: A class-to-boxes dictionary.
class_names: A class names list.
model_size: The input size of the model.
"""
colors = ((np.array(color_palette("hls", 80)) * 255)).astype(np.uint8)
for num, img, boxes_dict in zip(range(len(img_array)), img_array,
boxes_dicts):
img = Image.fromarray(img)
draw = ImageDraw.Draw(img)
font = ImageFont.truetype(font='./detector/fonts/futur.ttf',
size=(img.size[0] + img.size[1]) // 100)
resize_factor = (
(img.size[0] / model_size[0], img.size[1] / model_size[1])
)
for cls in range(len(class_names)):
boxes = boxes_dict[cls]
if np.size(boxes) != 0:
color = colors[cls]
for box in boxes:
xy, confidence = box[:4], box[4]
xy = [xy[i] * resize_factor[i % 2] for i in range(4)]
x0, y0 = xy[0], xy[1]
thickness = (img.size[0] + img.size[1]) // 200
for t in np.linspace(0, 1, thickness):
xy[0], xy[1] = xy[0] + t, xy[1] + t
xy[2], xy[3] = xy[2] - t, xy[3] - t
draw.rectangle(xy, outline=tuple(color))
text = '{} {:.1f}%'.format(class_names[cls],
confidence * 100)
text_size = draw.textsize(text, font=font)
draw.rectangle(
[x0, y0 - text_size[1], x0 + text_size[0], y0],
fill=tuple(color))
draw.text((x0, y0 - text_size[1]), text, fill='black',
font=font)
cv2.imshow('screen', np.array(img))
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
|
py | b409fcea76940fe4a9afe88f9af60833af78b293 | import sys
from PyQt4.QtGui import QIntValidator, QDoubleValidator, QApplication, QSizePolicy
from Orange.widgets import widget, gui
from Orange.widgets.settings import Setting
from Orange.data import Table, Domain, ContinuousVariable
import numpy as np
#try:
# from ..tools.xoppy_calc import xoppy_doc
#except ImportError:
# print("Error importing: xoppy_doc")
# raise
#try:
# from ..tools.xoppy_calc import xoppy_calc_BC_Mirror
#except ImportError:
# print("compute pressed.")
# print("Error importing: xoppy_calc_BC_Mirror")
# raise
class OWBC_Mirror(widget.OWWidget):
name = "BC_Mirror"
id = "orange.widgets.dataBC_Mirror"
description = "xoppy application to compute..."
icon = "icons/xoppy_BC_Mirror.png"
author = "create_widget.py"
maintainer_email = "[email protected]"
priority = 10
category = ""
keywords = ["xoppy", "BC_Mirror"]
outputs = [#{"name": "xoppy_data",
# "type": np.ndarray,
# "doc": ""},
{"name": "xoppy_table",
"type": Table,
"doc": ""},
{"name": "xoppy_specfile",
"type": str,
"doc": ""}]
#inputs = [{"name": "Name",
# "type": type,
# "handler": None,
# "doc": ""}]
want_main_area = False
coating = Setting("Rh")
thickness = Setting(1e-06)
density = Setting(10.0)
def __init__(self):
super().__init__()
box0 = gui.widgetBox(self.controlArea, " ",orientation="horizontal")
#widget buttons: compute, set defaults, help
gui.button(box0, self, "Compute", callback=self.compute)
gui.button(box0, self, "Defaults", callback=self.defaults)
gui.button(box0, self, "Help", callback=self.help1)
self.process_showers()
box = gui.widgetBox(self.controlArea, " ",orientation="vertical")
idx = -1
#widget index 0
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "coating",
label=self.unitLabels()[idx], addSpace=True)
self.show_at(self.unitFlags()[idx], box1)
#widget index 1
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "thickness",
label=self.unitLabels()[idx], addSpace=True,
valueType=float, validator=QDoubleValidator())
self.show_at(self.unitFlags()[idx], box1)
#widget index 2
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "density",
label=self.unitLabels()[idx], addSpace=True,
valueType=float, validator=QDoubleValidator())
self.show_at(self.unitFlags()[idx], box1)
self.process_showers()
gui.rubber(self.controlArea)
def unitLabels(self):
return ['coating material', 'coating thickness', 'coating density']
def unitFlags(self):
return ['True', 'True', 'True']
def compute(self):
print("compute executed.")
#table = Table.from_numpy(domain, out)
#self.send("xoppy_table",table)
def defaults(self):
self.resetSettings()
self.compute()
return
def help1(self):
print("help pressed.")
#xoppy_doc('BC_Mirror')
if __name__ == "__main__":
app = QApplication(sys.argv)
w = OWBC_Mirror()
w.show()
app.exec()
w.saveSettings()
|
py | b409fd4792839a18253c3032cf09d53ebebee586 | import h5py
import numpy as np
def SignNumpy(x):
return np.greater(x,0)
# convert a fully connected binarized layer plus batch normalization into
# the simplified form (binary weight and positive threshold)
# note that the neurons are assumed to be in the columns of the weight
# matrix
def makeBNComplex(after_bn_thres, fanin, beta, gamma, mean, invstd, use_rowmajor=False, usePopCount=True):
outs = fanin.shape[0]
print ("Extracting FCBN complex, outs = %d" % (outs))
# we'll fill in the binarized weights and thresholds iteratively
# w_bin = range(ins*outs)
thresholds = range(outs)
for neuron in range(outs):
# compute a preliminary threshold from the batchnorm parameters
thres = mean[neuron] + ((after_bn_thres - beta[neuron]) / (abs(gamma[neuron]*invstd[neuron])+1e-4))
need_flip = 0
# ensure all neurons activate on the "positive" side, so we can use
# greater-than-threshold activation
# if gamma[neuron]*invstd[neuron] < 0:
# need_flip = 1
# thres = -thres
# if thres > 32767:
# thres = 32767
# if thres < -32768:
# thres = -32768
# turn threshold into "number of 1s" (popcount) instead of signed sum
if usePopCount:
#thresholds[neuron] = int((fanin[neuron] + thres) / 2)
thresholds[neuron] = (fanin[neuron] + thres) / 2
else:
thresholds[neuron] = thres
# # binarize the synapses
# for synapse in range(ins):
# # note how we change from col major to row major if requested
# dest_ind = neuron*ins+synapse if use_rowmajor else synapse*outs+neuron
# if need_flip:
# w_bin[dest_ind] = binarize(-weights[synapse][neuron])
# else:
# w_bin[dest_ind] = binarize(weights[synapse][neuron])
# # reshape the output as desired
# if use_rowmajor:
# w_bin = np.asarray(w_bin).reshape((outs, ins))
# else:
# w_bin = np.asarray(w_bin).reshape((ins, outs))
#return (w_bin, thresholds)
return thresholds
# binarize and pack convolutional layer weights into a matrix and compute
# thresholds from the conv bias and batchnorm parameters
def makeConvBNComplex(fanin, beta, gamma, mean, invstd, interleaveChannels=False, usePopCount=True):
numOut = fanin.shape[0]
print ("Extracting conv-BN complex, OFM=%d" % (numOut))
# the fanin is used to ensure positive-only threshold
# w_bin = range(numOut * numIn * k * k)
# one threshold per output channel
thresholds = range(numOut)
# dest_ind = 0
# we'll fill in the binarized weights and thresholds iteratively
for neuron in range(numOut):
# compute a preliminary threshold from the batchnorm parameters,
# subtracting the conv bias from the batchnorm mean
thres = mean[neuron] - (beta[neuron] / (gamma[neuron]*invstd[neuron]))
# need_flip = 0
# ensure all neurons activate on the "positive" side, so we can use
# greater-than-threshold activation
if gamma[neuron]*invstd[neuron] < 0:
# need_flip = 1
thres = -thres
# turn threshold into "number of 1s" (popcount) instead of signed sum
if usePopCount:
thresholds[neuron] = int((fanin[neuron] + thres) / 2)
else:
thresholds[neuron] = thres
# # go through each weight of each convolutional kernel
# if interleaveChannels:
# for ky in range(k):
# for kx in range(k):
# for ifm in range(numIn):
# f = -1 if need_flip else +1
# w_bin[dest_ind] = binarize(f*weights[neuron][ifm][ky][kx])
# dest_ind += 1
# else:
# for ifm in range(numIn):
# for ky in range(k):
# for kx in range(k):
# f = -1 if need_flip else +1
# w_bin[dest_ind] = binarize(f*weights[neuron][ifm][ky][kx])
# dest_ind += 1
#
# # reshape the output as desired
# w_bin = np.asarray(w_bin).reshape((numOut, fanin))
# return (w_bin, thresholds)
return thresholds
if __name__ == "__main__":
print("Loading the pretrained parameters...")
bl = h5py.File("pretrained_network_5lut.h5", 'r')
#bl = h5py.File("dummy.h5", 'r')
# init model parameter lists
batch_norm_eps=1e-4
weights = []
gammas = []
means = []
pruning_masks = []
rand_maps = []
bn_betas = []
bn_gammas = []
bn_means = []
bn_inv_stds = []
# conv layer 1
bl_w1 = np.array(bl["model_weights"]["binary_conv_1"]["binary_conv_1"]["Variable_1:0"])
bl_rand_map = np.array(bl["model_weights"]["binary_conv_1"]["binary_conv_1"]["rand_map_0:0"])
bl_pruning_mask = np.array(bl["model_weights"]["binary_conv_1"]["binary_conv_1"]["pruning_mask:0"]).reshape(bl_w1.shape)
bl_gamma = np.array(bl["model_weights"]["binary_conv_1"]["binary_conv_1"]["Variable:0"])
bl_bn_beta = np.array(bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["beta:0"])
bl_bn_gamma = np.array(bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["gamma:0"])
bl_bn_mean = np.array(bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_mean:0"])
bl_bn_inv_std = 1/np.sqrt(np.array(bl["model_weights"]["batch_normalization_1"]["batch_normalization_1"]["moving_variance:0"])+batch_norm_eps)
bl_means = bl["model_weights"]["residual_sign_1"]["residual_sign_1"]["means:0"]
##Pruning
#bl_w1 = bl_w1 * np.reshape(bl_pruning_mask, (bl_w1.shape))
w_lut = [bl_w1]
#weights = [weights, w_lut]
weights = [w_lut]
#gammas = [gammas, bl_gamma]
gammas=[bl_gamma]
#pruning_masks = [pruning_masks, bl_pruning_mask]
pruning_masks=[bl_pruning_mask]
#rand_maps = [rand_maps, bl_rand_map]
rand_maps=[bl_rand_map]
#means = [means, bl_means]
means=[bl_means]
#bn_betas = [bn_betas, bl_bn_beta]
bn_betas=[bl_bn_beta]
#bn_gammas = [bn_gammas, bl_bn_gamma]
bn_gammas=[bl_bn_gamma]
#bn_means = [bn_means, bl_bn_mean]
bn_means=[bl_bn_mean]
#bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]
bn_inv_stds=[bl_bn_inv_std]
# conv layer 2
bl_w1 = np.array(bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_1:0"])
#bl_w2 = np.array(bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_2:0"])
#bl_w3 = np.array(bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_3:0"])
#bl_w4 = np.array(bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_4:0"])
#bl_w5 = np.array(bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_5:0"])
#bl_w6 = np.array(bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_6:0"])
#bl_w7 = np.array(bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_7:0"])
#bl_w8 = np.array(bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable_8:0"])
bl_rand_map = np.array(bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["rand_map_0:0"])
bl_pruning_mask = np.array(bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["pruning_mask:0"]).reshape(bl_w1.shape)
bl_gamma = np.array(bl["model_weights"]["binary_conv_2"]["binary_conv_2"]["Variable:0"])
bl_bn_beta = np.array(bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["beta:0"])
bl_bn_gamma = np.array(bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["gamma:0"])
bl_bn_mean = np.array(bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_mean:0"])
bl_bn_inv_std = 1/np.sqrt(np.array(bl["model_weights"]["batch_normalization_2"]["batch_normalization_2"]["moving_variance:0"])+batch_norm_eps)
bl_means = bl["model_weights"]["residual_sign_2"]["residual_sign_2"]["means:0"]
#w_lut = [bl_w1*bl_pruning_mask, bl_w2*bl_pruning_mask, bl_w3*bl_pruning_mask, bl_w4*bl_pruning_mask, bl_w5*bl_pruning_mask, bl_w6*bl_pruning_mask, bl_w7*bl_pruning_mask, bl_w8*bl_pruning_mask]
w_lut = [bl_w1]
#weights = [weights, w_lut]
weights.extend([w_lut])
#gammas = [gammas, bl_gamma]
gammas.extend([bl_gamma])
#pruning_masks = [pruning_masks, bl_pruning_mask]
pruning_masks.extend([bl_pruning_mask])
#rand_maps = [rand_maps, bl_rand_map]
rand_maps.extend([bl_rand_map])
#means = [means, bl_means]
means.extend([bl_means])
#bn_betas = [bn_betas, bl_bn_beta]
bn_betas.extend([bl_bn_beta])
#bn_gammas = [bn_gammas, bl_bn_gamma]
bn_gammas.extend([bl_bn_gamma])
#bn_means = [bn_means, bl_bn_mean]
bn_means.extend([bl_bn_mean])
#bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]
bn_inv_stds.extend([bl_bn_inv_std])
# conv layer 3
bl_w1 = np.array(bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_1:0"])
#bl_w2 = np.array(bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_2:0"])
#bl_w3 = np.array(bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_3:0"])
#bl_w4 = np.array(bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_4:0"])
#bl_w5 = np.array(bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_5:0"])
#bl_w6 = np.array(bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_6:0"])
#bl_w7 = np.array(bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_7:0"])
#bl_w8 = np.array(bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable_8:0"])
bl_rand_map = np.array(bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["rand_map_0:0"])
bl_pruning_mask = np.array(bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["pruning_mask:0"]).reshape(bl_w1.shape)
bl_gamma = np.array(bl["model_weights"]["binary_conv_3"]["binary_conv_3"]["Variable:0"])
bl_bn_beta = np.array(bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["beta:0"])
bl_bn_gamma = np.array(bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["gamma:0"])
bl_bn_mean = np.array(bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_mean:0"])
bl_bn_inv_std = 1/np.sqrt(np.array(bl["model_weights"]["batch_normalization_3"]["batch_normalization_3"]["moving_variance:0"])+batch_norm_eps)
bl_means = bl["model_weights"]["residual_sign_3"]["residual_sign_3"]["means:0"]
#w_lut = [bl_w1*bl_pruning_mask, bl_w2*bl_pruning_mask, bl_w3*bl_pruning_mask, bl_w4*bl_pruning_mask, bl_w5*bl_pruning_mask, bl_w6*bl_pruning_mask, bl_w7*bl_pruning_mask, bl_w8*bl_pruning_mask]
w_lut = [bl_w1]
#weights = [weights, w_lut]
weights.extend([w_lut])
#gammas = [gammas, bl_gamma]
gammas.extend([bl_gamma])
#pruning_masks = [pruning_masks, bl_pruning_mask]
pruning_masks.extend([bl_pruning_mask])
#rand_maps = [rand_maps, bl_rand_map]
rand_maps.extend([bl_rand_map])
#means = [means, bl_means]
means.extend([bl_means])
#bn_betas = [bn_betas, bl_bn_beta]
bn_betas.extend([bl_bn_beta])
#bn_gammas = [bn_gammas, bl_bn_gamma]
bn_gammas.extend([bl_bn_gamma])
#bn_means = [bn_means, bl_bn_mean]
bn_means.extend([bl_bn_mean])
#bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]
bn_inv_stds.extend([bl_bn_inv_std])
# conv layer 4
bl_w1 = np.array(bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_1:0"])
#bl_w2 = np.array(bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_2:0"])
#bl_w3 = np.array(bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_3:0"])
#bl_w4 = np.array(bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_4:0"])
#bl_w5 = np.array(bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_5:0"])
#bl_w6 = np.array(bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_6:0"])
#bl_w7 = np.array(bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_7:0"])
#bl_w8 = np.array(bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable_8:0"])
bl_rand_map = np.array(bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["rand_map_0:0"])
bl_pruning_mask = np.array(bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["pruning_mask:0"]).reshape(bl_w1.shape)
bl_gamma = np.array(bl["model_weights"]["binary_conv_4"]["binary_conv_4"]["Variable:0"])
bl_bn_beta = np.array(bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["beta:0"])
bl_bn_gamma = np.array(bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["gamma:0"])
bl_bn_mean = np.array(bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_mean:0"])
bl_bn_inv_std = 1/np.sqrt(np.array(bl["model_weights"]["batch_normalization_4"]["batch_normalization_4"]["moving_variance:0"])+batch_norm_eps)
bl_means = bl["model_weights"]["residual_sign_4"]["residual_sign_4"]["means:0"]
#w_lut = [bl_w1*bl_pruning_mask, bl_w2*bl_pruning_mask, bl_w3*bl_pruning_mask, bl_w4*bl_pruning_mask, bl_w5*bl_pruning_mask, bl_w6*bl_pruning_mask, bl_w7*bl_pruning_mask, bl_w8*bl_pruning_mask]
w_lut = [bl_w1]
#weights = [weights, w_lut]
weights.extend([w_lut])
#gammas = [gammas, bl_gamma]
gammas.extend([bl_gamma])
#pruning_masks = [pruning_masks, bl_pruning_mask]
pruning_masks.extend([bl_pruning_mask])
#rand_maps = [rand_maps, bl_rand_map]
rand_maps.extend([bl_rand_map])
#means = [means, bl_means]
means.extend([bl_means])
#bn_betas = [bn_betas, bl_bn_beta]
bn_betas.extend([bl_bn_beta])
#bn_gammas = [bn_gammas, bl_bn_gamma]
bn_gammas.extend([bl_bn_gamma])
#bn_means = [bn_means, bl_bn_mean]
bn_means.extend([bl_bn_mean])
#bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]
bn_inv_stds.extend([bl_bn_inv_std])
# conv layer 5
bl_w1 = np.array(bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_1:0"])
#bl_w2 = np.array(bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_2:0"])
#bl_w3 = np.array(bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_3:0"])
#bl_w4 = np.array(bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_4:0"])
#bl_w5 = np.array(bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_5:0"])
#bl_w6 = np.array(bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_6:0"])
#bl_w7 = np.array(bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_7:0"])
#bl_w8 = np.array(bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable_8:0"])
bl_rand_map = np.array(bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["rand_map_0:0"])
bl_pruning_mask = np.array(bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["pruning_mask:0"]).reshape(bl_w1.shape)
bl_gamma = np.array(bl["model_weights"]["binary_conv_5"]["binary_conv_5"]["Variable:0"])
bl_bn_beta = np.array(bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["beta:0"])
bl_bn_gamma = np.array(bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["gamma:0"])
bl_bn_mean = np.array(bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_mean:0"])
bl_bn_inv_std = 1/np.sqrt(np.array(bl["model_weights"]["batch_normalization_5"]["batch_normalization_5"]["moving_variance:0"])+batch_norm_eps)
bl_means = bl["model_weights"]["residual_sign_5"]["residual_sign_5"]["means:0"]
#w_lut = [bl_w1*bl_pruning_mask, bl_w2*bl_pruning_mask, bl_w3*bl_pruning_mask, bl_w4*bl_pruning_mask, bl_w5*bl_pruning_mask, bl_w6*bl_pruning_mask, bl_w7*bl_pruning_mask, bl_w8*bl_pruning_mask]
w_lut = [bl_w1]
#weights = [weights, w_lut]
weights.extend([w_lut])
#gammas = [gammas, bl_gamma]
gammas.extend([bl_gamma])
#pruning_masks = [pruning_masks, bl_pruning_mask]
pruning_masks.extend([bl_pruning_mask])
#rand_maps = [rand_maps, bl_rand_map]
rand_maps.extend([bl_rand_map])
#means = [means, bl_means]
means.extend([bl_means])
#bn_betas = [bn_betas, bl_bn_beta]
bn_betas.extend([bl_bn_beta])
#bn_gammas = [bn_gammas, bl_bn_gamma]
bn_gammas.extend([bl_bn_gamma])
#bn_means = [bn_means, bl_bn_mean]
bn_means.extend([bl_bn_mean])
#bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]
bn_inv_stds.extend([bl_bn_inv_std])
# conv layer 6
bl_w1 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_1:0"])
bl_w2 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_2:0"])
bl_w3 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_3:0"])
bl_w4 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_4:0"])
bl_w5 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_5:0"])
bl_w6 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_6:0"])
bl_w7 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_7:0"])
bl_w8 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_8:0"])
bl_w9 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_9:0"])
bl_w10 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_10:0"])
bl_w11 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_11:0"])
bl_w12 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_12:0"])
bl_w13 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_13:0"])
bl_w14 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_14:0"])
bl_w15 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_15:0"])
bl_w16 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_16:0"])
bl_w17 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_17:0"])
bl_w18 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_18:0"])
bl_w19 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_19:0"])
bl_w20 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_20:0"])
bl_w21 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_21:0"])
bl_w22 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_22:0"])
bl_w23 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_23:0"])
bl_w24 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_24:0"])
bl_w25 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_25:0"])
bl_w26 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_26:0"])
bl_w27 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_27:0"])
bl_w28 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_28:0"])
bl_w29 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_29:0"])
bl_w30 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_30:0"])
bl_w31 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_31:0"])
bl_w32 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_32:0"])
bl_w33 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_33:0"])
bl_w34 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_34:0"])
bl_w35 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_35:0"])
bl_w36 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_36:0"])
bl_w37 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_37:0"])
bl_w38 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_38:0"])
bl_w39 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_39:0"])
bl_w40 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_40:0"])
bl_w41 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_41:0"])
bl_w42 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_42:0"])
bl_w43 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_43:0"])
bl_w44 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_44:0"])
bl_w45 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_45:0"])
bl_w46 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_46:0"])
bl_w47 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_47:0"])
bl_w48 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_48:0"])
bl_w49 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_49:0"])
bl_w50 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_50:0"])
bl_w51 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_51:0"])
bl_w52 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_52:0"])
bl_w53 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_53:0"])
bl_w54 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_54:0"])
bl_w55 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_55:0"])
bl_w56 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_56:0"])
bl_w57 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_57:0"])
bl_w58 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_58:0"])
bl_w59 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_59:0"])
bl_w60 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_60:0"])
bl_w61 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_61:0"])
bl_w62 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_62:0"])
bl_w63 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_63:0"])
bl_w64 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable_64:0"])
bl_rand_map_0 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["rand_map_0:0"])
bl_rand_map_1 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["rand_map_1:0"])
bl_rand_map_2 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["rand_map_2:0"])
bl_rand_map_3 = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["rand_map_3:0"])
bl_pruning_mask = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["pruning_mask:0"]).reshape(bl_w1.shape)
bl_gamma = np.array(bl["model_weights"]["binary_conv_6"]["binary_conv_6"]["Variable:0"])
bl_bn_beta = np.array(bl["model_weights"]["batch_normalization_6"]["batch_normalization_6"]["beta:0"])
bl_bn_gamma = np.array(bl["model_weights"]["batch_normalization_6"]["batch_normalization_6"]["gamma:0"])
bl_bn_mean = np.array(bl["model_weights"]["batch_normalization_6"]["batch_normalization_6"]["moving_mean:0"])
bl_bn_inv_std = 1/np.sqrt(np.array(bl["model_weights"]["batch_normalization_6"]["batch_normalization_6"]["moving_variance:0"])+batch_norm_eps)
bl_means = bl["model_weights"]["residual_sign_6"]["residual_sign_6"]["means:0"]
w_lut = [bl_w1*bl_pruning_mask, bl_w2*bl_pruning_mask, bl_w3*bl_pruning_mask, bl_w4*bl_pruning_mask, bl_w5*bl_pruning_mask, bl_w6*bl_pruning_mask, bl_w7*bl_pruning_mask, bl_w8*bl_pruning_mask, bl_w9*bl_pruning_mask, bl_w10*bl_pruning_mask, bl_w11*bl_pruning_mask, bl_w12*bl_pruning_mask, bl_w13*bl_pruning_mask, bl_w14*bl_pruning_mask, bl_w15*bl_pruning_mask, bl_w16*bl_pruning_mask, bl_w17*bl_pruning_mask, bl_w18*bl_pruning_mask, bl_w19*bl_pruning_mask, bl_w20*bl_pruning_mask, bl_w21*bl_pruning_mask, bl_w22*bl_pruning_mask, bl_w23*bl_pruning_mask, bl_w24*bl_pruning_mask, bl_w25*bl_pruning_mask, bl_w26*bl_pruning_mask, bl_w27*bl_pruning_mask, bl_w28*bl_pruning_mask, bl_w29*bl_pruning_mask, bl_w30*bl_pruning_mask, bl_w31*bl_pruning_mask, bl_w32*bl_pruning_mask, bl_w33*bl_pruning_mask, bl_w34*bl_pruning_mask, bl_w35*bl_pruning_mask, bl_w36*bl_pruning_mask, bl_w37*bl_pruning_mask, bl_w38*bl_pruning_mask, bl_w39*bl_pruning_mask, bl_w40*bl_pruning_mask, bl_w41*bl_pruning_mask, bl_w42*bl_pruning_mask, bl_w43*bl_pruning_mask, bl_w44*bl_pruning_mask, bl_w45*bl_pruning_mask, bl_w46*bl_pruning_mask, bl_w47*bl_pruning_mask, bl_w48*bl_pruning_mask, bl_w49*bl_pruning_mask, bl_w50*bl_pruning_mask, bl_w51*bl_pruning_mask, bl_w52*bl_pruning_mask, bl_w53*bl_pruning_mask, bl_w54*bl_pruning_mask, bl_w55*bl_pruning_mask, bl_w56*bl_pruning_mask, bl_w57*bl_pruning_mask, bl_w58*bl_pruning_mask, bl_w59*bl_pruning_mask, bl_w60*bl_pruning_mask, bl_w61*bl_pruning_mask, bl_w62*bl_pruning_mask, bl_w63*bl_pruning_mask, bl_w64*bl_pruning_mask]
#weights = [weights, w_lut]
weights.extend([w_lut])
#gammas = [gammas, bl_gamma]
gammas.extend([bl_gamma])
#pruning_masks = [pruning_masks, bl_pruning_mask]
pruning_masks.extend([bl_pruning_mask])
bl_rand_map = [bl_rand_map_0, bl_rand_map_1, bl_rand_map_2, bl_rand_map_3]
#rand_maps = [rand_maps, bl_rand_map]
rand_maps.extend([bl_rand_map])
#means = [means, bl_means]
means.extend([bl_means])
#bn_betas = [bn_betas, bl_bn_beta]
bn_betas.extend([bl_bn_beta])
#bn_gammas = [bn_gammas, bl_bn_gamma]
bn_gammas.extend([bl_bn_gamma])
#bn_means = [bn_means, bl_bn_mean]
bn_means.extend([bl_bn_mean])
#bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]
bn_inv_stds.extend([bl_bn_inv_std])
# dense layer 1
bl_w1 = np.array(bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_1:0"])
#bl_w2 = np.array(bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_2:0"])
#bl_w3 = np.array(bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_3:0"])
#bl_w4 = np.array(bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_4:0"])
#bl_w5 = np.array(bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_5:0"])
#bl_w6 = np.array(bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_6:0"])
#bl_w7 = np.array(bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_7:0"])
#bl_w8 = np.array(bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable_8:0"])
bl_rand_map = np.array(bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["rand_map:0"])
bl_pruning_mask = np.array(bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["pruning_mask:0"]).reshape(bl_w1.shape)
bl_gamma = np.array(bl["model_weights"]["binary_dense_1"]["binary_dense_1"]["Variable:0"])
bl_bn_beta = np.array(bl["model_weights"]["batch_normalization_7"]["batch_normalization_7"]["beta:0"])
bl_bn_gamma = np.array(bl["model_weights"]["batch_normalization_7"]["batch_normalization_7"]["gamma:0"])
bl_bn_mean = np.array(bl["model_weights"]["batch_normalization_7"]["batch_normalization_7"]["moving_mean:0"])
bl_bn_inv_std = 1/np.sqrt(np.array(bl["model_weights"]["batch_normalization_7"]["batch_normalization_7"]["moving_variance:0"])+batch_norm_eps)
bl_means = bl["model_weights"]["residual_sign_7"]["residual_sign_7"]["means:0"]
#w_lut = [bl_w1*bl_pruning_mask, bl_w2*bl_pruning_mask, bl_w3*bl_pruning_mask, bl_w4*bl_pruning_mask, bl_w5*bl_pruning_mask, bl_w6*bl_pruning_mask, bl_w7*bl_pruning_mask, bl_w8*bl_pruning_mask]
w_lut = [bl_w1]
#weights = [weights, w_lut]
weights.extend([w_lut])
#gammas = [gammas, bl_gamma]
gammas.extend([bl_gamma])
#pruning_masks = [pruning_masks, bl_pruning_mask]
pruning_masks.extend([bl_pruning_mask])
#rand_maps = [rand_maps, bl_rand_map]
rand_maps.extend([bl_rand_map])
#means = [means, bl_means]
means.extend([bl_means])
#bn_betas = [bn_betas, bl_bn_beta]
bn_betas.extend([bl_bn_beta])
#bn_gammas = [bn_gammas, bl_bn_gamma]
bn_gammas.extend([bl_bn_gamma])
#bn_means = [bn_means, bl_bn_mean]
bn_means.extend([bl_bn_mean])
#bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]
bn_inv_stds.extend([bl_bn_inv_std])
# dense layer 2
bl_w1 = np.array(bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_1:0"])
#bl_w2 = np.array(bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_2:0"])
#bl_w3 = np.array(bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_3:0"])
#bl_w4 = np.array(bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_4:0"])
#bl_w5 = np.array(bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_5:0"])
#bl_w6 = np.array(bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_6:0"])
#bl_w7 = np.array(bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_7:0"])
#bl_w8 = np.array(bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable_8:0"])
bl_rand_map = np.array(bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["rand_map:0"])
bl_pruning_mask = np.array(bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["pruning_mask:0"]).reshape(bl_w1.shape)
bl_gamma = np.array(bl["model_weights"]["binary_dense_2"]["binary_dense_2"]["Variable:0"])
bl_bn_beta = np.array(bl["model_weights"]["batch_normalization_8"]["batch_normalization_8"]["beta:0"])
bl_bn_gamma = np.array(bl["model_weights"]["batch_normalization_8"]["batch_normalization_8"]["gamma:0"])
bl_bn_mean = np.array(bl["model_weights"]["batch_normalization_8"]["batch_normalization_8"]["moving_mean:0"])
bl_bn_inv_std = 1/np.sqrt(np.array(bl["model_weights"]["batch_normalization_8"]["batch_normalization_8"]["moving_variance:0"])+batch_norm_eps)
bl_means = bl["model_weights"]["residual_sign_8"]["residual_sign_8"]["means:0"]
#w_lut = [bl_w1*bl_pruning_mask, bl_w2*bl_pruning_mask, bl_w3*bl_pruning_mask, bl_w4*bl_pruning_mask, bl_w5*bl_pruning_mask, bl_w6*bl_pruning_mask, bl_w7*bl_pruning_mask, bl_w8*bl_pruning_mask]
w_lut = [bl_w1]
#weights = [weights, w_lut]
weights.extend([w_lut])
#gammas = [gammas, bl_gamma]
gammas.extend([bl_gamma])
#pruning_masks = [pruning_masks, bl_pruning_mask]
pruning_masks.extend([bl_pruning_mask])
#rand_maps = [rand_maps, bl_rand_map]
rand_maps.extend([bl_rand_map])
#means = [means, bl_means]
means.extend([bl_means])
#bn_betas = [bn_betas, bl_bn_beta]
bn_betas.extend([bl_bn_beta])
#bn_gammas = [bn_gammas, bl_bn_gamma]
bn_gammas.extend([bl_bn_gamma])
#bn_means = [bn_means, bl_bn_mean]
bn_means.extend([bl_bn_mean])
#bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]
bn_inv_stds.extend([bl_bn_inv_std])
# dense layer 3
bl_w1 = np.array(bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_1:0"])
#bl_w2 = np.array(bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_2:0"])
#bl_w3 = np.array(bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_3:0"])
#bl_w4 = np.array(bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_4:0"])
#bl_w5 = np.array(bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_5:0"])
#bl_w6 = np.array(bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_6:0"])
#bl_w7 = np.array(bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_7:0"])
#bl_w8 = np.array(bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable_8:0"])
bl_rand_map = np.array(bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["rand_map:0"])
bl_pruning_mask = np.array(bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["pruning_mask:0"]).reshape(bl_w1.shape)
bl_gamma = np.array(bl["model_weights"]["binary_dense_3"]["binary_dense_3"]["Variable:0"])
bl_bn_beta = np.array(bl["model_weights"]["batch_normalization_9"]["batch_normalization_9"]["beta:0"])
bl_bn_gamma = np.array(bl["model_weights"]["batch_normalization_9"]["batch_normalization_9"]["gamma:0"])
bl_bn_mean = np.array(bl["model_weights"]["batch_normalization_9"]["batch_normalization_9"]["moving_mean:0"])
bl_bn_inv_std = 1/np.sqrt(np.array(bl["model_weights"]["batch_normalization_9"]["batch_normalization_9"]["moving_variance:0"])+batch_norm_eps)
#bl_means = bl["model_weights"]["residual_sign_9"]["residual_sign_9"]["means:0"]
#w_lut = [bl_w1*bl_pruning_mask, bl_w2*bl_pruning_mask, bl_w3*bl_pruning_mask, bl_w4*bl_pruning_mask, bl_w5*bl_pruning_mask, bl_w6*bl_pruning_mask, bl_w7*bl_pruning_mask, bl_w8*bl_pruning_mask]
w_lut = [bl_w1]
#weights = [weights, w_lut]
weights.extend([w_lut])
#gammas = [gammas, bl_gamma]
gammas.extend([bl_gamma])
#pruning_masks = [pruning_masks, bl_pruning_mask]
pruning_masks.extend([bl_pruning_mask])
#rand_maps = [rand_maps, bl_rand_map]
rand_maps.extend([bl_rand_map])
#means = [means, bl_means]
#means.extend(bl_means)
#bn_betas = [bn_betas, bl_bn_beta]
bn_betas.extend([bl_bn_beta])
#bn_gammas = [bn_gammas, bl_bn_gamma]
bn_gammas.extend([bl_bn_gamma])
#bn_means = [bn_means, bl_bn_mean]
bn_means.extend([bl_bn_mean])
#bn_inv_stds = [bn_inv_stds, bl_bn_inv_std]
bn_inv_stds.extend([bl_bn_inv_std])
print("Binarizing the pretrained parameters...")
# Binarize the weights
weights[0][0] = SignNumpy(weights[0][0])
for i in range(1,9):
if i==5:
for j in range(64):
weights[i][j] = SignNumpy(weights[i][j])
else:
for j in range(1):
weights[i][j] = SignNumpy(weights[i][j])
# write header file
with open('../src/weights.h', 'w') as f:
f.write('#pragma once\n')
with open('../src/weights.h', 'a') as f:
f.write('//Generated weights for CIFAR-10\n')
for layer_id in range(9):
# generate weights
if layer_id!=5: # first layer: fxp inputs and binary weights
weights_per_act = 1
else:
weights_per_act = 64 # weights_per_act = #_of_bits_per_act x 2 ^ #_of_lut_inputs
dims = np.shape(weights[layer_id][0])
if len(dims)==2:
layer_type = "fc"
word_length = dims[0]
nfilters = dims[1]
elif len(dims)==4:
layer_type = "conv"
word_length = dims[0]*dims[1]*dims[2]
nfilters = dims[3]
# for weight_id in range(weights_per_act):
# mat = weights[layer_id][weight_id]
# if layer_type=="fc":
# mat_flat = mat.transpose(1,0).flatten()
# elif layer_type=="conv":
# mat_flat = mat.transpose(3,0,1,2).flatten()
# else:
# print("unknown weight format!")
#
# with open('../src/weights.h', 'a') as f:
# f.write('//Array shape: {}\n'.format(dims))
# fold = (word_length-1)/32 + 1
# f.write("const ap_uint<32> " + "weights_" + layer_type + str(layer_id+1) + "_" + str(weight_id+1) + "["+str(nfilters*fold) + "] = {")
# bin_append = 0
# for i, ele in enumerate(mat_flat):
# #bin_append = (bin_append << 1) | (int(ele) # left-first bit-push
# bin_append = bin_append | (int(ele) << (i % word_length)) # right-first bit-push
# if (i % word_length == (word_length - 1)):
# mask = 0xFFFFFFFF
# for i_32b in range(fold):
# #word = (bin_append>>(32*(fold-i_32b-1))) & mask # Big-endian: left-first word-push
# word = (bin_append>>(32*i_32b)) & mask # Little-endian: right-first word-push
# hex_word = '%X' % word
# if i_32b!=0:
# f.write(', ')
# f.write('0x' + hex_word)
# bin_append = 0
# if i != nfilters*word_length-1:
# f.write(', ')
# f.write('};\n')
if layer_id==5:
# generate verilog source file for LUTARRAY: Vivado HLS will take forever
with open('../src/LUTARRAY_b0_' + str(layer_id) + '.v', 'w') as v0:
v0.write('`timescale 1 ns / 1 ps\n\n')
v0.write('module LUTARRAY_b0 (\n in_V,\n in_1_V,\n in_2_V,\n in_3_V,\n in_4_V')
for tm in range(nfilters):
v0.write(',\n ap_return_' + str(tm))
v0.write(');\n\n')
with open('../src/LUTARRAY_b1_' + str(layer_id) + '.v', 'w') as v1:
v1.write('`timescale 1 ns / 1 ps\n\n')
v1.write('module LUTARRAY_b1 (\n in_V,\n in_1_V,\n in_2_V,\n in_3_V,\n in_4_V')
for tm in range(nfilters):
v1.write(',\n ap_return_' + str(tm))
v1.write(');\n\n')
mat_flat = []
for weight_id in range(weights_per_act):
mat = weights[layer_id][weight_id]
pm = pruning_masks[layer_id]#.transpose(3,0,1,2).flatten()
if layer_type=="fc":
mat = mat.transpose(1,0)
pm_flat = pm.transpose(1,0)
elif layer_type=="conv":
mat = mat.transpose(3,0,1,2).reshape((nfilters, -1))
pm_flat = pm.transpose(3,0,1,2).reshape((nfilters, -1))
else:
print("unknown weight format!")
mat_flat.extend([mat])
with open('../src/LUTARRAY_b0_' + str(layer_id) + '.v', 'a') as v0:
v0.write('\n\n')
v0.write('input [' + str(word_length-1) + ':0] in_V;\n')
v0.write('input [' + str(word_length-1) + ':0] in_1_V;\n')
v0.write('input [' + str(word_length-1) + ':0] in_2_V;\n')
v0.write('input [' + str(word_length-1) + ':0] in_3_V;\n')
v0.write('input [' + str(word_length-1) + ':0] in_4_V;\n')
for tm in range(nfilters):
v0.write('output [' + str(word_length-1) + ':0] ap_return_' + str(tm) + ';\n')
for tm in range(nfilters):
for ti, ele in enumerate(pm_flat[tm]):
if ele==1:
v0.write('wire tmp_' + str(tm) + '_' + str(ti) + ';\n')
v0.write('assign tmp_' + str(tm) + '_' + str(ti) + ' = ')
v0.write('(' + str(int(mat_flat[32][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[33][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[34][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[35][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[36][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[37][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[38][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[39][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[40][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[41][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[42][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[43][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[44][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[45][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[46][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[47][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[48][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[49][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[50][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[51][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[52][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[53][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[54][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[55][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[56][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[57][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[58][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[59][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[60][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[61][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[62][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v0.write('(' + str(int(mat_flat[63][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']);\n ')
v0.write('assign ap_return_' + str(tm) + ' = {')
for ti, ele in enumerate(pm_flat[tm]):
if ele == 0:
v0.write("1'b0")
elif ele == 1:
v0.write('tmp_' + str(tm) + '_' + str(ti))
else:
print("pruning mask elements must be binary!")
if ti != word_length-1:
v0.write(',')
else:
v0.write('};\n')
v0.write('endmodule')
with open('../src/LUTARRAY_b1_' + str(layer_id) + '.v', 'a') as v1:
v1.write('\n\n')
v1.write('input [' + str(word_length-1) + ':0] in_V;\n')
v1.write('input [' + str(word_length-1) + ':0] in_1_V;\n')
v1.write('input [' + str(word_length-1) + ':0] in_2_V;\n')
v1.write('input [' + str(word_length-1) + ':0] in_3_V;\n')
v1.write('input [' + str(word_length-1) + ':0] in_4_V;\n')
for tm in range(nfilters):
v1.write('output [' + str(word_length-1) + ':0] ap_return_' + str(tm) + ';\n')
for tm in range(nfilters):
for ti, ele in enumerate(pm_flat[tm]):
if ele==1:
v1.write('wire tmp_' + str(tm) + '_' + str(ti) + ';\n')
v1.write('assign tmp_' + str(tm) + '_' + str(ti) + ' = ')
v1.write('(' + str(int(mat_flat[0][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[1][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[2][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[3][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[4][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[5][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[6][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[7][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[8][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[9][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[10][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[11][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[12][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[13][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[14][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[15][tm][ti])) + ' & in_V[' + str(ti) + '] & in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[16][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[17][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[18][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[19][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[20][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[21][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[22][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[23][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[24][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[25][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[26][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[27][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[28][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[29][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[30][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']) | ')
v1.write('(' + str(int(mat_flat[31][tm][ti])) + ' & in_V[' + str(ti) + '] & ~in_1_V[' + str(ti) + '] & ~in_2_V[' + str(ti) + '] & ~in_3_V[' + str(ti) + '] & ~in_4_V[' + str(ti) + ']);\n ')
v1.write('assign ap_return_' + str(tm) + ' = {')
for ti, ele in enumerate(pm_flat[tm]):
if ele == 0:
v1.write("1'b0")
elif ele == 1:
v1.write('tmp_' + str(tm) + '_' + str(ti))
else:
print("pruning mask elements must be binary!")
if ti != word_length-1:
v1.write(',')
else:
v1.write('};\n')
v1.write('endmodule')
# generate pruning mask (first layer only)
if layer_id==0:
pruning_mask_flat = pruning_masks[layer_id].transpose(3,0,1,2).flatten()
with open('../src/weights.h', 'a') as f:
fold = (word_length-1)/32 + 1
f.write("const ap_uint<32> " + "pruning_mask_" + layer_type + str(layer_id+1) + "_" + str(1) + "["+str(nfilters*fold) + "] = {")
bin_append = 0
for i, ele in enumerate(pruning_mask_flat):
#bin_append = (bin_append << 1) | (int(ele) # left-first bit-push
bin_append = bin_append | (int(ele) << (i % word_length)) # right-first bit-push
if (i % word_length == (word_length - 1)):
mask = 0xFFFFFFFF
for i_32b in range(fold):
#word = (bin_append>>(32*(fold-i_32b-1))) & mask # Big-endian: left-first word-push
word = (bin_append>>(32*i_32b)) & mask # Little-endian: right-first word-push
hex_word = '%X' % word
if i_32b!=0:
f.write(', ')
f.write('0x' + hex_word)
bin_append = 0
if i != nfilters*word_length-1:
f.write(', ')
f.write('};\n')
# generate threshold
if layer_id!=8: # the last layer does not need threshold
use_popcount = not(layer_id==0)
next_means_b0 = abs(means[layer_id][0])
print(next_means_b0)
next_means_b1 = abs(means[layer_id][1])
print(next_means_b1)
if layer_type=="conv":
fanin = np.sum(pruning_masks[layer_id].reshape(-1,dims[3]),axis=0)
elif layer_type=="fc":
fanin = np.sum(pruning_masks[layer_id],axis=0)
if layer_id!=0:
fanin = fanin * abs(gammas[layer_id] * means[layer_id-1][0]) + fanin * abs(gammas[layer_id] * means[layer_id-1][1])
thresholds = np.array(makeBNComplex(0, fanin, bn_betas[layer_id], bn_gammas[layer_id], bn_means[layer_id], bn_inv_stds[layer_id], usePopCount=use_popcount))
next_means_bn_b0 = np.array(makeBNComplex(next_means_b0, fanin, bn_betas[layer_id], bn_gammas[layer_id], bn_means[layer_id], bn_inv_stds[layer_id], usePopCount=use_popcount)) - thresholds
with open('../src/weights.h', 'a') as f:
f.write("const ap_fixed<24, 16> " + "thresh_" + layer_type + str(layer_id+1) + "["+str(len(thresholds))+"] = {")
for i, ele in enumerate(thresholds):
if i == 0:
f.write(str(ele))
else:
f.write(','+ str(ele))
f.write('};\n')
f.write("const ap_fixed<24, 16> " + "next_layer_means_" + layer_type + str(layer_id+1) + "["+str(len(next_means_bn_b0))+"] = {")
for i, ele in enumerate(next_means_bn_b0):
if i == 0:
f.write(str(ele))
else:
f.write(','+ str(ele))
f.write('};\n')
# # generate next layer mean
# if layer_id!=8:
# with open('../src/weights.h', 'a') as f:
# next_means_b0 = abs(means[layer_id][0])
# next_means_b1 = abs(means[layer_id][1])
# f.write("const ap_fixed<24, 16> " + "next_layer_means_" + layer_type + str(layer_id+1) + "[2] = {")
# f.write(str(next_means_b0))
# f.write(','+ str(next_means_b1))
# f.write('};\n')
# generate random map
for j in range(4):
with open('../src/weights.h', 'a') as f:
rand_map = rand_maps[layer_id][j].flatten().astype(np.uint32)
f.write("const unsigned int " + "rand_map_" + str(j) + "_" + layer_type + str(layer_id+1) + "["+str(len(rand_map))+"] = {")
for i, ele in enumerate(rand_map):
if i == 0:
f.write(str(ele))
else:
f.write(','+ str(ele))
f.write('};\n')
# generate alpha
with open('../src/weights.h', 'a') as f:
if layer_id!=0:
alpha_b0 = abs(gammas[layer_id] * means[layer_id-1][0])
alpha_b1 = abs(gammas[layer_id] * means[layer_id-1][1])
f.write("const ap_fixed<24, 16> " + "alpha_" + layer_type + str(layer_id+1) + "[2] = {")
f.write(str(alpha_b0))
f.write(','+ str(alpha_b1))
f.write('};\n')
else:
alpha_b0 = abs(gammas[layer_id])
f.write("const ap_fixed<24, 16> " + "alpha_" + layer_type + str(layer_id+1) + "[1] = {")
f.write(str(alpha_b0))
f.write('};\n')
|
py | b409fe284222fda1e56db921f9205c340b47a761 | #!/usr/bin/env python3
import json
import sys
import traceback
from mgm_logger import MgmLogger
import mgm_utils
segments = list()
# Converts AMP Transcript json to Draft JS which is used by the transcript editor.
def main():
(root_dir, from_transcript, diarization_json, to_draftjs) = sys.argv[1:5]
# using output instead of input filename as the latter is unique while the former could be used by multiple jobs
logger = MgmLogger(root_dir, "hmgm_transcript", to_draftjs)
sys.stdout = logger
sys.stderr = logger
try:
# exit to requeue here if Transcript->DraftJs conversion already done
mgm_utils.exit_if_file_generated(to_draftjs)
print("Converting from Transcript " + from_transcript + " to DraftJs: " + to_draftjs)
if diarization_json is not None and diarization_json!='None':
fill_speakers(diarization_json)
speaker_count = 0
out_json = dict()
out_json['entityMap'] = {}
out_json['blocks'] = []
# Open the transcribe output
with open(from_transcript) as json_file:
try:
json_input = json.load(json_file)
except ValueError as e:
raise Exception("Exception: Invalid JSON format for input file", e)
# Check to see if we have the required input
if 'results' not in json_input.keys():
raise Exception("Exception: Missing required results input. ")
ampResults = json_input['results']
if 'words' not in ampResults.keys() or 'transcript' not in ampResults.keys():
raise Exception("Exception: Missing required words or transcript input. ")
ampResultsWords = ampResults['words']
ampTranscript = ampResults['transcript']
blockWords = list() # Words in this data block
entityRanges = list() # A list of entity ranges
lastOffset = 0 # The last offset of a word we searched for
speaker_name = None
block_start = None
this_transcript = ''
# Iterate through all of the words
for w in range(0, len(ampResultsWords)):
word = ampResultsWords[w]
nextWord = None
punctuation = ""
wordText = word['text']
# Check to see if the next "word" is punctuation. If so, append it to the current word
if len(ampResultsWords) > w + 1:
nextWord = ampResultsWords[w + 1]
if nextWord['type'] == 'punctuation':
punctuation += nextWord['text']
# If the current word is actually a word, create the necessary output
if word['type'] == 'pronunciation':
# Use the current position as the key
key = w
start = word['start']
# Record the start of the block
if block_start is None:
block_start = start
# Check to see if speaker has changed
tmp_speaker_name = get_speaker_name(start, word['end'], speaker_count)
if speaker_name is None:
speaker_name = tmp_speaker_name
# If we have more than one word...
if key > 0:
# If it is a new speaker, record the words associated with the previous speaker and restart.
if tmp_speaker_name != speaker_name:
speaker_count+=1
# Create the data values necessary
data = createData(speaker_name, blockWords, block_start)
# Add this all as a block. We only have one since only one speaker
block = createBlock(0, data, entityRanges, this_transcript)
out_json['blocks'].append(block)
# Once we have logged a block, reset the values
blockWords = list() # Words in this data block
entityRanges = list()
block_start = start
this_transcript = ''
speaker_name = tmp_speaker_name
lastOffset = 0
# Append punctuation if there is any
textWithPunct = wordText + punctuation
# For this block, generate transcript text
this_transcript = this_transcript + " " + textWithPunct
# Create the word
# if score is present for word and score type is confidence, use the score value; otherwise default to 1.0
score_value = word['score']['scoreValue'] if 'score' in word and word['score']['type'] == 'confidence' else 1.0
newWord = {
'start': start,
'end': word['end'],
'confidence': score_value,
'index':key,
'punct': textWithPunct,
'text': wordText
}
# Create the entity range
entityRange = {
'offset': lastOffset,
'key': key,
'length': len(textWithPunct),
'start': start,
'end': newWord['end'],
'confidence': newWord['confidence'],
'text': wordText
}
# Find the offset in the paragraph, starting with the last offset
lastOffset = len(this_transcript)
# Create the entity map listing
out_json['entityMap'][key] = {
'mutability': 'MUTABLE',
'type': "WORD",
'data': entityRange
}
# Add this to the entity range
entityRanges.append(entityRange)
# Add the word
blockWords.append(newWord)
# If it's the end, make sure we get the
if w == (len(ampResultsWords) -1):
data = createData(speaker_name, blockWords, block_start)
# Add this all as a block. We only have one since only one speaker
block = createBlock(0, data, entityRanges, this_transcript)
out_json['blocks'].append(block)
# Write the json
write_to_draftjs(out_json, to_draftjs)
print("Successfully converted from Transcript " + from_transcript + " to DraftJs: " + to_draftjs)
# implicitly exit 0 as the current command completes
except Exception as e:
# empty out to_draftjs to tell the following HMGM task command to fail
mgm_utils.empty_file(to_draftjs)
print ("Error: Failed to convert from Transcript " + from_transcript + " to DraftJs: " + to_draftjs, e)
traceback.print_exc()
sys.stdout.flush()
exit(1)
def createBlock(depth, data, entityRanges, transcript):
return {
'depth': depth,
'data' : data,
'entityRanges': entityRanges,
'text' : transcript.strip(),
'type' : 'paragraph',
'inlineStyleRanges': []
}
def createData(speaker, words, start):
data = dict()
data['speaker'] = speaker # Generic speaker since we don't have speakers at this point
data['words'] = words
data['start'] = start
return data
def fill_speakers(diarization_json):
try:
with open(diarization_json) as diarization_json:
segmentation = json.load(diarization_json)
# Conversion already done. Exit
if 'segments' in segmentation.keys():
for s in range(0, len(segmentation['segments'])):
segments.append(segmentation['segments'][s])
except ValueError as e:
raise Exception("Exception: failed to read Diarization json", e)
def get_speaker_name(start, end, speaker_count):
if len(segments)==0:
return "Speaker_0"
name = None
for s in range(0, len(segments)):
this_segment = segments[s]
if this_segment["start"] <= start and this_segment["end"] >= end:
if 'speakerLabel' in this_segment.keys() and this_segment['speakerLabel'] is not None:
name = this_segment['speakerLabel']
elif 'label' in this_segment.keys() and this_segment['label'] is not None:
name = this_segment['label'] + "_" + str(s)
if name is None:
name = "Speaker_" + str(speaker_count)
speaker_count += 1
return name
# Serialize schema obj and write it to output file
def write_to_draftjs(input_json, json_file):
# Serialize the segmentation object
with open(json_file, 'w') as outfile:
json.dump(input_json, outfile, default=lambda x: x.__dict__)
if __name__ == "__main__":
main() |
py | b409fe550f27d9127363b0c27ea9b6978cdbb94c | # Author: Javad Amirian
# Email: [email protected]
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
# matplotlib.use('PS')
from opentraj.toolkit.core.trajdataset import TrajDataset
def deviation_from_linear_pred(trajlets, save_plots_to):
dp_from_t0 = trajlets[:, :, :2] - np.expand_dims(trajlets[:, 0, :2], 1)
first_significant_displacement_idx = (np.linalg.norm(dp_from_t0, axis=2) > 0.25).argmax(axis=1)
first_significant_displacement = np.stack([dp_from_t0[i, first_significant_displacement_idx[i], :2]
for i in range(len(trajlets))])
# start_thetas = np.arctan2(trajlets[:, 0, 2], trajlets[:, 0, 3]) # calculated from first velocity vector
# start_thetas = np.arctan2(trajlets[:, 2, 0] - trajlets[:, 0, 0],
# trajlets[:, 2, 1] - trajlets[:, 0, 1])
start_thetas = np.arctan2(first_significant_displacement[:, 0], first_significant_displacement[:, 1])
rot_matrices = np.stack([np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]]) for theta in start_thetas])
trajs_zero_based = trajlets[:, :, :2] - trajlets[:, 0, :2].reshape((-1, 1, 2))
trajs_aligned = np.matmul(rot_matrices, trajs_zero_based.transpose((0, 2, 1))).transpose((0, 2, 1))
is_nan = ~np.any(np.any(np.isnan(trajs_aligned), axis=2), axis=1)
trajs_aligned = trajs_aligned[is_nan, :, :]
keypoints = np.mean(trajs_aligned[:, :, :], axis=0)
keypoints_radius = np.linalg.norm(keypoints, axis=1)
keypoints_dev_avg = np.rad2deg(np.arctan2(keypoints[:, 0], keypoints[:, 1]))
keypoints_dev_std = np.std(np.rad2deg(np.arctan2(trajs_aligned[:, :, 0],
trajs_aligned[:, :, 1])), axis=0)
# ======== PLOT ============
fig1, ax1 = plt.subplots()
trajs_plt = ax1.plot(trajs_aligned[:, :, 1].T, trajs_aligned[:, :, 0].T, alpha=0.3, color='blue')
avg_plt = ax1.plot(keypoints[::2, 1], keypoints[::2, 0], 'o', color='red')
for ii in range(2, len(keypoints), 2):
arc_i = patches.Arc([0, 0], zorder=10,
width=keypoints_radius[ii] * 2,
height=keypoints_radius[ii] * 2,
theta1=keypoints_dev_avg[ii] - keypoints_dev_std[ii],
theta2=keypoints_dev_avg[ii] + keypoints_dev_std[ii])
ax1.add_patch(arc_i)
ax1.grid()
ax1.set_aspect('equal')
plt.title(ds_name)
plt.xlim([-1.5, 10])
plt.ylim([-4, 4])
plt.legend(handles=[trajs_plt[0], avg_plt[0]],
labels=["trajlets", "avg"], loc="lower left")
plt.savefig(os.path.join(save_plots_to, 'dev-' + ds_name + '.png'))
# plt.show()
return keypoints_dev_avg, keypoints_dev_std
def run(trajlets, output_dir):
global ds_name
# dataset_names = list(trajlets.keys())
deviation_stats = {1.6: [], 2.4: [], 4.8: []}
for ds_name in dataset_names:
dev_avg, dev_std = deviation_from_linear_pred(trajlets[ds_name], output_dir)
for t in [1.6, 2.4, 4.8]:
dt = np.diff(trajlets[ds_name][0, :, 4])[0]
time_index = int(round(t/dt))-1
deviation_stats[t].append([dev_avg[time_index], dev_std[time_index]])
deviation_stats[1.6] = np.array(deviation_stats[1.6])
deviation_stats[2.4] = np.array(deviation_stats[2.4])
deviation_stats[4.8] = np.array(deviation_stats[4.8])
fig = plt.figure(figsize=(len(dataset_names)+2, 4))
ax1 = fig.add_subplot(211)
plt.bar(np.arange(len(dataset_names)), deviation_stats[4.8][:, 0],
yerr=deviation_stats[4.8][:, 1], alpha=0.7, color='red',
error_kw=dict(ecolor='blue', lw=2, capsize=5, capthick=2))
plt.xticks([])
plt.yticks([-30, -15, 0, 15, 30], ['$-30^o$', '$-15^o$', '$0^o$', '$15^o$', '$30^o$'])
# plt.yticks([-30, 0, 30])
plt.grid(axis='y', linestyle='--')
plt.ylabel('$t=4.8s$')
ax2 = fig.add_subplot(212)
plt.bar(np.arange(len(dataset_names)), abs(deviation_stats[2.4][:, 0]),
yerr=deviation_stats[2.4][:, 1], alpha=0.7, color='red',
error_kw=dict(ecolor='blue', lw=2, capsize=5, capthick=2))
plt.xticks([])
plt.yticks([-20, -10, 0, 10, 20], ['$-20^o$', '$-10^o$', '$0^o$', '$10^o$', '$20^o$'])
plt.grid(axis='y', linestyle='--')
plt.ylabel('$t=2.4s$')
# ax3 = fig.add_subplot(313)
# plt.bar(np.arange(len(dataset_names)), abs(deviation_stats[1.6][:, 0]),
# yerr=deviation_stats[1.6][:, 1], alpha=0.5,
# error_kw=dict(ecolor='gray', lw=2, capsize=5, capthick=2))
# plt.ylabel('t=1.6s')
plt.xticks(np.arange(0, len(dataset_names), 1.0))
ax2.set_xticklabels(dataset_names)
ax2.xaxis.set_tick_params(labelsize=8)
plt.xticks(rotation=-20)
# ax1.margins(0.05)
plt.subplots_adjust(wspace=0, hspace=.10)
plt.savefig(os.path.join(output_dir, 'traj_deviation.pdf'), dpi=400, bbox_inches='tight')
plt.show()
if __name__ == "__main__":
from opentraj.toolkit.test.load_all import all_dataset_names, get_trajlets
opentraj_root = sys.argv[1]
output_dir = sys.argv[2]
# dataset_names = ['KITTI']
dataset_names = ['ETH-Univ', 'ETH-Hotel', 'UCY-Zara', 'UCY-Univ']
# dataset_names = all_dataset_names
trajlets = get_trajlets(opentraj_root, dataset_names)
run(trajlets, output_dir)
|
py | b409fe9921e2a16538035d69dd7f043509990b99 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
#
# CDS-ILS is free software; you can redistribute it and/or modify it under
# the terms of the MIT License; see LICENSE file for more details.
"""Test documents migration."""
from cds_ils.importer.providers.cds.cds import get_helper_dict
from cds_ils.migrator.series import journal_marc21
from cds_ils.migrator.xml_to_json_dump import CDSRecordDump
from tests.migrator.xml_imports.helpers import load_json
def test_migrate_record(datadir, base_app):
"""Test migrate date."""
# [[ migrate the book ]]
with base_app.app_context():
data = load_json(datadir, 'journal.json')
dump = CDSRecordDump(data=data[0], dojson_model=journal_marc21)
dump.prepare_revisions()
res = dump.revisions[-1][1]
assert res['legacy_recid'] == 229384
assert res == {
'_created': '1992-01-21',
"_migration": {
"is_multipart": False,
"has_related": True,
"related": [
{
"related_recid": "229630",
"relation_type": "sequence",
"relation_description": None,
"sequence_order": "previous",
}
],
"record_type": "journal",
"volumes": [],
},
'mode_of_issuance': 'SERIAL', 'legacy_recid': 229384,
'agency_code': 'SzGeCERN', 'alternative_titles': [
{'type': 'ABBREVIATION', 'value': 'Br. J. Appl. Phys.'}],
'keywords': [{'value': 'Institute of Physics', 'source': 'CERN'},
{'value': 'JPD', 'source': 'CERN'},
{'value': 'JPhysD', 'source': 'CERN'}],
'publisher': 'IOP',
'note': 'Explanation of the series change: v 1 - 18 (1950-67); ser 2 v 1 - 2 (1968-69). Ser 2 subtitled: Journal of physics D',
'internal_notes': [{'value': 'Online archives purchased 2014'}],
'access_urls': [
{
'value': 'http://iopscience.iop.org/0508-3443',
'description': 'v 1 (1950) - v 18 (1967)',
'access_restriction': [
'RESTRICTED_PERPETUAL_ACCESS_BACKFILES'],
'open_access': False,
'login_required': True,
}
],
'subjects': [{'value': '53', 'scheme': 'UDC'}],
'title': 'British journal of applied physics',
'identifiers': [
{'scheme': 'ISSN', 'value': '0508-3443',
'material': 'PRINT_VERSION'}],
'languages': ['ENG'],
'series_type': 'PERIODICAL',
'physical_volumes': [
{'description': 'v 1 (1950) - v 18 (1967)',
'location': 'DE2'}]}
|
py | b409ff791ca96ff58ebcd37387e6aeb05d7e838d | import os
import time
import numpy as np
from sklearn.externals import joblib
from sklearn.ensemble import *
from train import *
from confusion_matrix import *
from Bio import SeqIO
# Plot confusion matrices
start = time.time()
sequences, sequence_locs = load_data()
experiments, input_columns, data = generate_features_and_experiments(start, sequences, sequence_locs)
experiment_columns = experiments['Simple Features + Physicochemical + Single Amino Acids + Dipeptide']
categories = ['Cyto', 'Mito', 'Nucl', 'Secr']
indices = np.random.permutation(data.shape[0])
input = data.as_matrix(experiment_columns)
output = np.squeeze(data.as_matrix(['locations']))
model = GradientBoostingClassifier(max_depth=4)
model.fit(input[indices], output[indices])
input_file = os.path.abspath(os.path.join('data','blind.fasta'))
test_seq_no = [str(fasta_record.id) for fasta_record in SeqIO.parse(open(input_file),'fasta')]
test_sequences = [str(fasta_record.seq) for fasta_record in SeqIO.parse(open(input_file),'fasta')]
test_experiments, test_input_columns, test_data = generate_features_and_experiments(start, test_sequences, ['blind']*len(test_sequences))
test_columns = test_experiments['Simple Features + Physicochemical + Single Amino Acids + Dipeptide']
test_input = test_data.as_matrix(test_columns)
predictions = model.predict(test_input)
probabilities = model.predict_proba(test_input)
for i in range(0, len(predictions)):
print('%s %s Confidence %s%%' % (test_seq_no[i], categories[predictions[i]], '{:.0f}'.format(100*np.max(probabilities[i]))))
|
py | b40a00be615bf451b1422e6e96050a63a5d1bb65 | #!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oc', 'adm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding data to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
# pylint: disable=too-many-arguments
class OCImage(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
def __init__(self,
namespace,
registry_url,
image_name,
image_tag,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCImage, self).__init__(namespace, kubeconfig)
self.namespace = namespace
self.registry_url = registry_url
self.image_name = image_name
self.image_tag = image_tag
self.kubeconfig = kubeconfig
self.verbose = verbose
def get(self):
'''return a image by name '''
results = self._get('imagestream', self.image_name)
results['exists'] = False
if results['returncode'] == 0 and results['results'][0]:
results['exists'] = True
if results['returncode'] != 0 and '"%s" not found' % self.image_name in results['stderr']:
results['returncode'] = 0
return results
def create(self, url=None, name=None, tag=None):
'''Create an image '''
return self._import_image(url, name, tag)
# pylint: disable=too-many-branches
def main():
'''
ansible oc module for image import
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
registry_url=dict(default=None, type='str'),
image_name=dict(default=None, type='str'),
image_tag=dict(default=None, type='str'),
content_type=dict(default='raw', choices=['yaml', 'json', 'raw'], type='str'),
force=dict(default=False, type='bool'),
),
supports_check_mode=True,
)
ocimage = OCImage(module.params['namespace'],
module.params['registry_url'],
module.params['image_name'],
module.params['image_tag'],
kubeconfig=module.params['kubeconfig'],
verbose=module.params['debug'])
state = module.params['state']
api_rval = ocimage.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval, state="list")
if not module.params['image_name']:
module.fail_json(msg='Please specify a name when state is absent|present.')
if state == 'present':
########
# Create
########
if not Utils.exists(api_rval['results'], module.params['image_name']):
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
api_rval = ocimage.create(module.params['registry_url'],
module.params['image_name'],
module.params['image_tag'])
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
# image exists, no change
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
if __name__ == '__main__':
from ansible.module_utils.basic import *
main()
|
py | b40a00d93becd0d4cb4093db3fb084a09dc3f9e2 | #!/usr/bin/env python
"""
Example of using PyDDL to solve the "Missionaries and Cannibals" Problem.
A boat must transport a group of 3 missionaries and 3 cannibals across a river,
but at no time can the cannibals outnumber the missionaries at either side of
the river.
"""
from __future__ import print_function
from pyddl import Domain, Problem, Action, neg, planner
class PartialOrderPlanner:
def __init__(self, planningproblem):
self.problem = planningproblem
self.initialize()
def initialize(self):
"""Initialize all variables"""
self.causal_links = []
self.start = Action('Start', preconditions=(), effects=self.problem.init_predicates)
self.finish = Action('Finish', preconditions=self.problem.goal_predicates, effects=())
self.actions = set()
self.actions.add(self.start)
self.actions.add(self.finish)
self.constraints = set()
self.constraints.add((self.start, self.finish))
self.agenda = set()
for precond in self.finish.preconditions:
self.agenda.add((precond, self.finish))
self.expanded_actions = self.problem.grounded_actions
def find_open_preconditions(self):
pass
def problem():
domain = Domain((
Action(
'Remove',
parameters=(
('tire', 't'),
('location', 'l')
),
preconditions=(
('at', 't', 'l')
),
effects=(
neg(('at', 't', 'l')),
('at', 't', 'ground'),
),
),
Action(
'PutOn',
parameters=(
('tire', 't'),
),
preconditions=(
('Tire', 't'),
('at', 't', 'ground'),
neg(('at', 'flat', 'axle'))
),
effects=(
('at', 't', 'axle'),
neg(('at', 't', 'ground')),
),
),
Action(
'LeaveOvernight',
parameters=(
),
preconditions=(
),
effects=(
neg(('at', 'spare', 'ground')),
neg(('at', 'spare', 'axle')),
neg(('at', 'spare', 'trunk')),
neg(('at', 'flat', 'ground')),
neg(('at', 'flat', 'axle')),
neg(('at', 'flat', 'trunk'))
),
)
))
problem = Problem(
domain,
{
'tire': ('flat', 'spare'),
'location': ('axle', 'Trunk', 'ground')
},
init=(
('Tire', 'flat'),
('Tire', 'spare'),
('at', 'flat', 'axle'),
('at', 'spare', 'trunk'),
),
goal=(
('at', 'spare', 'axle'),
('at', 'flat', 'ground'),
)
)
return problem
if __name__ == "__main__":
st = problem()
pop = PartialOrderPlanner(st)
print(pop.agenda)
print(pop.expanded_actions)
|
py | b40a0304066e11dd2da9b91539e52cea3a1caea6 | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.new_confirmed_coins_transactions_rb_data import NewConfirmedCoinsTransactionsRBData
globals()['NewConfirmedCoinsTransactionsRBData'] = NewConfirmedCoinsTransactionsRBData
from cryptoapis.model.new_confirmed_coins_transactions_rb import NewConfirmedCoinsTransactionsRB
class TestNewConfirmedCoinsTransactionsRB(unittest.TestCase):
"""NewConfirmedCoinsTransactionsRB unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNewConfirmedCoinsTransactionsRB(self):
"""Test NewConfirmedCoinsTransactionsRB"""
# FIXME: construct object with mandatory attributes with example values
# model = NewConfirmedCoinsTransactionsRB() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b40a0472651b5d6d387a5097aa6c182f7a607c1c | import random
from src.EHs.binaryCounterEH import BinaryExactWindow, BinaryCounterEH
random.seed(888)
if __name__ == "__main__":
nElems = 100000
windowLen = 10000
eps = 0.01
hist = BinaryCounterEH(windowLen, eps)
exactWindow = BinaryExactWindow(windowLen)
sumRelativeError = 0
maxRelativeError = 0
sumBucketCount = 0
maxBucketCount = 0
for eventTimestamp in range(nElems):
event = random.randint(1, 10) > 5
exactWindow.add(event)
hist.add(eventTimestamp, event)
if eventTimestamp >= windowLen:
if eventTimestamp % windowLen == 0:
exactCount = exactWindow.query()
approxCount = hist.get_estimate()
if exactCount != 0:
relativeError = abs(exactCount - approxCount) / float(exactCount)
sumRelativeError += relativeError
maxRelativeError = max(maxRelativeError, relativeError)
sumBucketCount += hist.buckets_count()
maxBucketCount = max(maxBucketCount, hist.buckets_count())
print('Estimated: ' + str(approxCount) + ', real: ' + str(exactCount))
print('Average relative error = ' + str(sumRelativeError / nElems))
print('Maximum relative error = ' + str(maxRelativeError))
print('Relative error violation = ' + str(maxRelativeError > eps))
print('Average bucket count = ' + str(sumBucketCount / (nElems - windowLen)))
print('Size relative to window = ' + str((sumBucketCount / nElems) / windowLen))
print('Maximum bucket count = ' + str(maxBucketCount))
print('Size relative to window = ' + str(maxBucketCount / windowLen)) |
py | b40a06d55209ac725dca4b574911e3c8aea632de | # Add following to menu.py:
# import sb_lensReflections_callbacks
import nuke
def sb_lensReflections_callbacks():
n = nuke.thisNode()
k = nuke.thisKnob()
if k.name() in ["selected", "xpos", "ypos"]:
return
if k.name() == "method":
noiseKnobs = ["noise_controls_text", "random_seed", "aspect_ratio", "mix", "noise_controls"]
plateKnobs = ["dirtPlate_text", "blackpoint_1", "whitepoint_1", "gamma_5", "saturation_1"]
if n["method"].value() == "generated noise":
for i in noiseKnobs:
n.knobs()[i].setVisible(True)
for i in plateKnobs:
n.knobs()[i].setVisible(False)
elif n["method"].value() == "dirt plate":
for i in noiseKnobs:
n.knobs()[i].setVisible(False)
for i in plateKnobs:
n.knobs()[i].setVisible(True)
nuke.addKnobChanged(sb_lensReflections_callbacks, nodeClass="sb_lensReflections") |
py | b40a070328b204dea840f216e086102668cc6e47 | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Parse multiple CEF formatted strings from a file"
class Input:
FILE = "file"
class Output:
CEFS = "cefs"
class ParseMultipleInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"file": {
"type": "string",
"title": "File",
"displayType": "bytes",
"description": "Parse multiple CEF formatted strings from a file",
"format": "bytes",
"order": 1
}
},
"required": [
"file"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ParseMultipleOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"cefs": {
"type": "array",
"title": "Cefs",
"description": "A list of CEF objects parsed from the file",
"items": {
"$ref": "#/definitions/cef"
},
"order": 1
}
},
"definitions": {
"cef": {
"type": "object",
"title": "cef",
"properties": {
"device_product": {
"type": "string",
"title": "Device Product",
"description": "With vendor and version, uniquely identifies the type of sending device",
"order": 3
},
"device_vendor": {
"type": "string",
"title": "Device Vendor",
"description": "With product and version, uniquely identifies the type of sending device",
"order": 2
},
"device_version": {
"type": "string",
"title": "Device Version",
"description": "With vendor and product, uniquely identifies the type of sending device",
"order": 4
},
"extension": {
"type": "object",
"title": "Extension",
"description": "JSON object of key value pairs with keys and values as defined by the ArcSight Extension Dictionary",
"order": 8
},
"name": {
"type": "string",
"title": "Name",
"description": "Represents a human-readable and understandable description of the event",
"order": 6
},
"severity": {
"type": "string",
"title": "Severity",
"description": "Reflects the importance of the event",
"enum": [
"Low",
"Medium",
"High",
"Very-High",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
],
"order": 7
},
"signature_id": {
"type": "string",
"title": "Signature Id",
"description": "Unique identifier per event-type",
"order": 5
},
"version": {
"type": "string",
"title": "Version",
"description": "Identifies the version of the CEF format",
"default": "0.1",
"order": 1
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
py | b40a0810a1c4f09d8fd0a7a39ed4c3f7d081728d | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['VpnGatewayArgs', 'VpnGateway']
@pulumi.input_type
class VpnGatewayArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
bgp_settings: Optional[pulumi.Input['BgpSettingsArgs']] = None,
connections: Optional[pulumi.Input[Sequence[pulumi.Input['VpnConnectionArgs']]]] = None,
gateway_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_hub: Optional[pulumi.Input['SubResourceArgs']] = None,
vpn_gateway_scale_unit: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a VpnGateway resource.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnGateway.
:param pulumi.Input['BgpSettingsArgs'] bgp_settings: Local network gateway's BGP speaker settings.
:param pulumi.Input[Sequence[pulumi.Input['VpnConnectionArgs']]] connections: List of all vpn connections to the gateway.
:param pulumi.Input[str] gateway_name: The name of the gateway.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input['SubResourceArgs'] virtual_hub: The VirtualHub to which the gateway belongs.
:param pulumi.Input[int] vpn_gateway_scale_unit: The scale unit for this vpn gateway.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if bgp_settings is not None:
pulumi.set(__self__, "bgp_settings", bgp_settings)
if connections is not None:
pulumi.set(__self__, "connections", connections)
if gateway_name is not None:
pulumi.set(__self__, "gateway_name", gateway_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_hub is not None:
pulumi.set(__self__, "virtual_hub", virtual_hub)
if vpn_gateway_scale_unit is not None:
pulumi.set(__self__, "vpn_gateway_scale_unit", vpn_gateway_scale_unit)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name of the VpnGateway.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="bgpSettings")
def bgp_settings(self) -> Optional[pulumi.Input['BgpSettingsArgs']]:
"""
Local network gateway's BGP speaker settings.
"""
return pulumi.get(self, "bgp_settings")
@bgp_settings.setter
def bgp_settings(self, value: Optional[pulumi.Input['BgpSettingsArgs']]):
pulumi.set(self, "bgp_settings", value)
@property
@pulumi.getter
def connections(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnConnectionArgs']]]]:
"""
List of all vpn connections to the gateway.
"""
return pulumi.get(self, "connections")
@connections.setter
def connections(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VpnConnectionArgs']]]]):
pulumi.set(self, "connections", value)
@property
@pulumi.getter(name="gatewayName")
def gateway_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the gateway.
"""
return pulumi.get(self, "gateway_name")
@gateway_name.setter
def gateway_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "gateway_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The VirtualHub to which the gateway belongs.
"""
return pulumi.get(self, "virtual_hub")
@virtual_hub.setter
def virtual_hub(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "virtual_hub", value)
@property
@pulumi.getter(name="vpnGatewayScaleUnit")
def vpn_gateway_scale_unit(self) -> Optional[pulumi.Input[int]]:
"""
The scale unit for this vpn gateway.
"""
return pulumi.get(self, "vpn_gateway_scale_unit")
@vpn_gateway_scale_unit.setter
def vpn_gateway_scale_unit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "vpn_gateway_scale_unit", value)
class VpnGateway(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bgp_settings: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnConnectionArgs']]]]] = None,
gateway_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_gateway_scale_unit: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
VpnGateway Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_settings: Local network gateway's BGP speaker settings.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnConnectionArgs']]]] connections: List of all vpn connections to the gateway.
:param pulumi.Input[str] gateway_name: The name of the gateway.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnGateway.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The VirtualHub to which the gateway belongs.
:param pulumi.Input[int] vpn_gateway_scale_unit: The scale unit for this vpn gateway.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VpnGatewayArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
VpnGateway Resource.
:param str resource_name: The name of the resource.
:param VpnGatewayArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpnGatewayArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bgp_settings: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnConnectionArgs']]]]] = None,
gateway_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_gateway_scale_unit: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VpnGatewayArgs.__new__(VpnGatewayArgs)
__props__.__dict__["bgp_settings"] = bgp_settings
__props__.__dict__["connections"] = connections
__props__.__dict__["gateway_name"] = gateway_name
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_hub"] = virtual_hub
__props__.__dict__["vpn_gateway_scale_unit"] = vpn_gateway_scale_unit
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20191201:VpnGateway"), pulumi.Alias(type_="azure-native:network:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20180401:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20180601:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20180701:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20180801:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20181001:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20181101:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20181201:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20190201:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20190401:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20190601:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20190701:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20190801:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20190901:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20191101:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20200301:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200301:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20200401:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20200501:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20200601:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20200701:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20200801:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20201101:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20201101:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20210201:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20210201:VpnGateway"), pulumi.Alias(type_="azure-native:network/v20210301:VpnGateway"), pulumi.Alias(type_="azure-nextgen:network/v20210301:VpnGateway")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VpnGateway, __self__).__init__(
'azure-native:network/v20191201:VpnGateway',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VpnGateway':
"""
Get an existing VpnGateway resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VpnGatewayArgs.__new__(VpnGatewayArgs)
__props__.__dict__["bgp_settings"] = None
__props__.__dict__["connections"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_hub"] = None
__props__.__dict__["vpn_gateway_scale_unit"] = None
return VpnGateway(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="bgpSettings")
def bgp_settings(self) -> pulumi.Output[Optional['outputs.BgpSettingsResponse']]:
"""
Local network gateway's BGP speaker settings.
"""
return pulumi.get(self, "bgp_settings")
@property
@pulumi.getter
def connections(self) -> pulumi.Output[Optional[Sequence['outputs.VpnConnectionResponse']]]:
"""
List of all vpn connections to the gateway.
"""
return pulumi.get(self, "connections")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the VPN gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The VirtualHub to which the gateway belongs.
"""
return pulumi.get(self, "virtual_hub")
@property
@pulumi.getter(name="vpnGatewayScaleUnit")
def vpn_gateway_scale_unit(self) -> pulumi.Output[Optional[int]]:
"""
The scale unit for this vpn gateway.
"""
return pulumi.get(self, "vpn_gateway_scale_unit")
|
py | b40a08570398c85db402719e7cf2240ad616157e | #!/usr/bin/env python2
# coding=utf-8
# ^^^^^^^^^^^^ TODO remove when supporting only Python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s RESQ too low! (Should be %s RESQ)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s RESQ too high! (Should be %s RESQ)"%(str(fee), str(target_fee)))
return curr_balance
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print "Mining blocks..."
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 500)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
assert_equal(self.nodes[1].getbalance(), 500)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 210 RESQ from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 110)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 100)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 200)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 1000 RESQ in block rewards plus fees, but
# minus the 210 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 1000-210)
assert_equal(self.nodes[2].getbalance(), 210)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 1000)
assert_equal(self.nodes[2].getbalance("from1"), 1000-210)
# Send 100 RESQ normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 100, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('900'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('100'))
# Send 100 RESQ with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 100, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('200'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 100 RESQ
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('100')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 100 RESQ with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 499.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
#check if wallet or blochchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label.encode('utf-8'), s.encode('utf-8')) # TODO remove encode(...) when supporting only Python3
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
'-salvagewallet',
]
for m in maintenance:
print "check " + m
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest ().main ()
|
py | b40a092de42cd1d47742ce9f5e0bf5f1fc76929c | import inspect
def track(s:str) -> str:
"""Prepends s with the filename and linenumber of call site
"""
frame = inspect.currentframe()
if frame:
prev = frame.f_back
if prev:
return f"[{prev.f_code.co_filename}:{prev.f_lineno}] {s}"
return s
# # Using outcome:
# def track(s:str) -> str:
# """Prepends s with the filename and linenumber of call site
# """
# for prevFrame in onSuccess(pureOutcome(inspect.currentframe())
# >> liftOutcome(lambda fr: fr.f_back)):
# return f"[{prevFrame.f_code.co_filename}:{prevFrame.f_lineno}] {s}"
# return s
# # Using pymaybe:
# def track(s:str) -> str:
# """Prepends s with the filename and linenumber of call site
# """
# prevFrame = maybe(inspect.currentframe()).f_back
# if prevFrame:
# return f"[{prevFrame.f_code.co_filename}:{prevFrame.f_lineno}] {s}"
# return s
|
py | b40a09413f8b62366473c29f262ace0bbb8cfb88 | """Provide unit tests for `~python_venv.env`:py:mod:."""
import os
import os.path
import random
import subprocess
import unittest
import parameterized # https://pypi.org/project/parameterized/
from python_venv import const, env
from python_venv import exceptions as exc
from python_venv import reqs
from tests.python_venv import contextmgr as ctx
from tests.python_venv import flags
########################################
@unittest.skipUnless(flags.should_run_pyenv_tests(), flags.SKIP_PYENV_MESSAGE)
class TestEnv_200_PyenvEnvironment(unittest.TestCase):
def setUp(self):
self.saved_requirements = reqs.REQUIREMENTS
def tearDown(self):
reqs.REQUIREMENTS = self.saved_requirements
def test_PV_ENV_PYNV_000_instantiate_empty(self):
with self.assertRaises(TypeError) as raised:
env.PyenvEnvironment()
msg = raised.exception.args[0]
self.assertTrue(
msg.startswith("__init__() missing 1 required positional argument")
)
@parameterized.parameterized.expand(
[
("dry_run", {"dry_run": True}, "dry_run", True),
("force", {"force": True}, "force", True),
(
"message_prefix",
{"message_prefix": "dummy_message_prefix"},
"message_prefix",
"dummy_message_prefix",
),
("python", {"python": "dummy_python"}, "python", "dummy_python"),
("basename", {"basename": "dummy_basename"}, "_basename", "dummy_basename"),
("env_name", {"env_name": "dummy_env_name"}, "_env_name", "dummy_env_name"),
(
"env_prefix",
{"env_prefix": "dummy_env_prefix"},
"_env_prefix",
"dummy_env_prefix",
),
]
)
def test_PV_ENV_PYNV_002_instantiate_kwargs(self, name, kwargs, attr, value):
x = env.PyenvEnvironment("dummy_req_scheme", **kwargs)
self.assertEqual(getattr(x, attr), value)
def test_PV_ENV_PYNV_010_requirements(self):
dummy_requirements = {"dummy_req_source": ["dummy_requirement"]}
reqs.REQUIREMENTS = {"dummy_req_scheme": [dummy_requirements]}
x = env.PyenvEnvironment("dummy_req_scheme")
self.assertListEqual(x.requirements.requirements, [dummy_requirements])
def test_PV_ENV_PYNV_020_package_name(self):
x = env.PyenvEnvironment("dummy_req_scheme")
self.assertEqual(x.package_name, "python_venv")
@parameterized.parameterized.expand(
[
("default", None, "python-venv"),
("specified", "dummy-package", "dummy-package"),
]
)
def test_PV_ENV_PYNV_030_basename(self, name, basename, expected):
kwargs = {} if basename is None else {"basename": basename}
x = env.PyenvEnvironment("dummy_req_scheme", **kwargs)
self.assertEqual(x.basename, expected)
@parameterized.parameterized.expand(
[
("default", reqs.REQ_SCHEME_PLAIN, {}, "python-venv"),
("default_dev", reqs.REQ_SCHEME_DEV, {}, "python-venv-dev"),
("default_devplus", reqs.REQ_SCHEME_DEVPLUS, {}, "python-venv-dev"),
(
"default_prefix",
reqs.REQ_SCHEME_PLAIN,
{"env_prefix": "dummy-prefix-"},
"dummy-prefix-python-venv",
),
(
"basename",
reqs.REQ_SCHEME_PLAIN,
{"basename": "dummy-package"},
"dummy-package",
),
(
"basename_dev",
reqs.REQ_SCHEME_DEV,
{"basename": "dummy-package"},
"dummy-package-dev",
),
(
"basename_devplus",
reqs.REQ_SCHEME_DEVPLUS,
{"basename": "dummy-package"},
"dummy-package-dev",
),
(
"basename_prefix",
reqs.REQ_SCHEME_PLAIN,
{"basename": "dummy-package", "env_prefix": "dummy-prefix-"},
"dummy-prefix-dummy-package",
),
("specified", "dummy_req_scheme", {"env_name": "dummy-env"}, "dummy-env"),
(
"specified_prefix",
"dummy_req_scheme",
{"env_name": "dummy-env", "env_prefix": "dummy-prefix-"},
"dummy-env",
),
]
)
def test_PV_ENV_PYNV_040_env_name(self, name, req_scheme, kwargs, expected):
x = env.PyenvEnvironment(req_scheme, **kwargs)
self.assertEqual(x.env_name, expected)
@parameterized.parameterized.expand(
[
("default", "dummy-basename", None, None, "<ENV_DIR>"),
("specified", None, "dummy-env", None, "<ENV_DIR>"),
("with_prefix", "dummy-basename", None, "dummy-prefix", "<ENV_DIR>"),
(
"specified_with_prefix",
"dummy-basename",
"dummy-env",
"dummy-prefix",
"<ENV_DIR>",
),
]
)
def test_PV_ENV_PYNV_050_env_dir_dry_run(
self, name, basename, env_name, env_prefix, expected
):
kwargs = {}
if basename is not None:
kwargs["basename"] = basename
if env_name is not None:
kwargs["env_name"] = env_name
if env_prefix is not None:
kwargs["env_prefix"] = env_prefix
x = env.PyenvEnvironment(reqs.REQ_SCHEME_PLAIN, dry_run=True, **kwargs)
self.assertEqual(x.env_dir, expected)
@parameterized.parameterized.expand(
[
(
"default",
"dummy-basename",
None,
None,
os.path.join(os.getcwd(), "<ENV_DIR>"),
),
(
"specified",
None,
"dummy-env",
None,
os.path.join(os.getcwd(), "<ENV_DIR>"),
),
(
"with_prefix",
"dummy-basename",
None,
"dummy-prefix",
os.path.join(os.getcwd(), "<ENV_DIR>"),
),
(
"specified_with_prefix",
"dummy-basename",
"dummy-env",
"dummy-prefix",
os.path.join(os.getcwd(), "<ENV_DIR>"),
),
]
)
def test_PV_ENV_PYNV_051_abs_env_dir_dry_run(
self, name, basename, env_name, env_prefix, expected
):
kwargs = {}
if basename is not None:
kwargs["basename"] = basename
if env_name is not None:
kwargs["env_name"] = env_name
if env_prefix is not None:
kwargs["env_prefix"] = env_prefix
x = env.PyenvEnvironment(reqs.REQ_SCHEME_PLAIN, dry_run=True, **kwargs)
self.assertEqual(x.abs_env_dir, expected)
@parameterized.parameterized.expand(
[
("specified", "dummy-env", "dummy-env"),
]
)
def test_PV_ENV_PYNV_060_env_description(self, name, env_name, expected):
kwargs = {} if env_name is None else {"env_name": env_name}
x = env.PyenvEnvironment("dummy_req_scheme", **kwargs)
x.env_description
self.assertTrue(x.env_description.endswith(expected))
@parameterized.parameterized.expand(
[
("dry_run_text", {}, "[DRY-RUN]"),
("create_msg", {}, "Creating pyenv environment dummy-package"),
("create_venv", {}, "+ pyenv virtualenv"),
("install_msg", {}, "Installing dummy_req_scheme requirements"),
(
"pip_install",
{},
"+ <ENV_DIR>/bin/python3 -m pip install -r dummy_requirements.txt",
),
("success", {}, "==> Done."),
]
)
def test_PV_ENV_PYNV_100_create_dry_run(self, name, kwargs, expected_text):
dummy_requirements = {const.FROM_FILES: ["dummy_requirements.txt"]}
reqs.REQUIREMENTS = {"dummy_req_scheme": [dummy_requirements]}
x = env.PyenvEnvironment(
"dummy_req_scheme",
dry_run=True,
basename="dummy-package",
ignore_preflight_checks=True,
**kwargs,
)
with ctx.capture(x.create) as (
status,
_stdout,
stderr,
):
self.assertTrue(expected_text in stderr)
@parameterized.parameterized.expand(
[
("dry_run_text", "[DRY-RUN]"),
("remove_msg", "Removing pyenv environment dummy-package"),
]
)
def test_PV_ENV_PYNV_200_remove_dry_run(self, name, expected_text):
x = env.PyenvEnvironment(
reqs.REQ_SCHEME_PLAIN, dry_run=True, basename="dummy-package"
)
with ctx.capture(x.remove) as (status, _stdout, stderr):
self.assertTrue(expected_text in stderr)
@parameterized.parameterized.expand(
[
("dry_run_text", "[DRY-RUN]"),
("replace_msg", "Replacing pyenv environment dummy-package"),
("remove_msg", "Removing pyenv environment dummy-package"),
("create_msg", "Creating pyenv environment dummy-package"),
("success", "==> Done."),
]
)
def test_PV_ENV_PYNV_300_replace_dry_run(self, name, expected_text):
dummy_requirements = {const.FROM_FILES: ["dummy_requirements.txt"]}
reqs.REQUIREMENTS = {"dummy_req_scheme": [dummy_requirements]}
x = env.PyenvEnvironment(
"dummy_req_scheme",
dry_run=True,
basename="dummy-package",
ignore_preflight_checks=True,
)
with ctx.capture(x.replace) as (status, _stdout, stderr):
self.assertTrue(expected_text in stderr)
########################################
@unittest.skipUnless(flags.should_run_pyenv_tests(), flags.SKIP_PYENV_MESSAGE)
class TestEnv_210_PyenvCreate(unittest.TestCase):
def setUp(self):
self.env_name = None
try:
self.choices
except AttributeError:
self.choices = (
[chr(x) for x in range(ord("0"), ord("9") + 1)]
+ [chr(x) for x in range(ord("A"), ord("Z") + 1)]
+ [chr(x) for x in range(ord("a"), ord("z") + 1)]
)
# Random prefix for environments is required
# since pyenv virtualenv doesn't give us a choice
# to place an environment somewhere specific.
self.env_prefix = "".join(random.choice(self.choices) for x in range(10)) + "-"
def tearDown(self):
if self.env_name is not None:
# remove pyenv virtual environment
subprocess.call(
["pyenv", "virtualenv-delete", "-f", self.env_name],
stderr=subprocess.DEVNULL,
)
self.env_name = None
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None, []),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None, []),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
None,
"dummy-env",
[],
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, None, "dummy-env", []),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None, []),
("dev", reqs.REQ_SCHEME_DEV, False, None, None, []),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None, []),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None, []),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None, []),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None, []),
("source_dry_run", reqs.REQ_SCHEME_SOURCE, True, None, None, []),
("source", reqs.REQ_SCHEME_SOURCE, False, None, None, []),
("wheel_dry_run", reqs.REQ_SCHEME_WHEEL, True, None, None, []),
("wheel", reqs.REQ_SCHEME_WHEEL, False, None, None, []),
("package_dry_run", reqs.REQ_SCHEME_PACKAGE, True, "argcomplete", None, []),
("package", reqs.REQ_SCHEME_PACKAGE, False, "argcomplete", None, []),
("pip_dry_run", reqs.REQ_SCHEME_PIP, True, None, None, ["argcomplete"]),
("pip", reqs.REQ_SCHEME_PIP, False, None, None, ["argcomplete"]),
]
)
def test_PV_ENV_PYNV_110_create(
self, name, req_scheme, dry_run, basename, env_name, pip_args
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
self.env_name = x.env_name
if not flags.should_suppress_output():
x.create()
else:
original_stderr = None
with ctx.capture_to_file(x.create) as (
_status,
_stdout,
stderr,
):
original_stderr = stderr
testable_stderr = original_stderr.lower()
if "error" in testable_stderr:
print(original_stderr, file=stderr)
self.assertNotIn("error", testable_stderr)
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None),
("dev", reqs.REQ_SCHEME_DEV, False, None, None),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None),
]
)
def test_PV_ENV_PYNV_120_create_missing_reqs(
self, name, req_scheme, dry_run, basename, env_name
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
with ctx.project("dummy_package", dirs=dirs):
x = env.PyenvEnvironment(
req_scheme,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
self.env_name = x.env_name
with self.assertRaises(exc.MissingRequirementsError):
if not flags.should_suppress_output():
x.create()
else:
with ctx.capture_to_file(x.create) as (
_status,
_stdout,
_stderr,
):
pass
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, True),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, True),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
"dummy-env",
True,
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, "dummy-env", True),
]
)
def test_PV_ENV_PYNV_130_create_duplicate(
self, name, req_scheme, dry_run, env_name, should_raise
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
env_name=env_name,
env_prefix=env_prefix,
dry_run=False,
force=True,
)
self.env_name = x.env_name
if not flags.should_suppress_output():
x.create()
else:
with ctx.capture_to_file(x.create) as (_status, _stdout, _stderr):
pass
x = env.PyenvEnvironment(
req_scheme,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
if should_raise:
with self.assertRaises(exc.EnvExistsError):
if not flags.should_suppress_output():
x.create()
else:
with ctx.capture_to_file(x.create) as (
_status,
_stdout,
_stderr,
):
pass
else:
if not flags.should_suppress_output():
x.create()
else:
original_stderr = None
with ctx.capture_to_file(x.create) as (_status, _stdout, stderr):
original_stderr = stderr
testable_stderr = original_stderr.lower()
if "error" in testable_stderr:
print(original_stderr, file=stderr)
self.assertNotIn("error", testable_stderr)
########################################
@unittest.skipUnless(flags.should_run_pyenv_tests(), flags.SKIP_PYENV_MESSAGE)
class TestEnv_220_PyenvRemove(unittest.TestCase):
def setUp(self):
self.env_name = None
try:
self.choices
except AttributeError:
self.choices = (
[chr(x) for x in range(ord("0"), ord("9") + 1)]
+ [chr(x) for x in range(ord("A"), ord("Z") + 1)]
+ [chr(x) for x in range(ord("a"), ord("z") + 1)]
)
# Random prefix for environments is required
# since pyenv virtualenv doesn't give us a choice
# to place an environment somewhere specific.
self.env_prefix = "".join(random.choice(self.choices) for x in range(10)) + "-"
def tearDown(self):
if self.env_name is not None:
# remove pyenv virtual environment
subprocess.call(
["pyenv", "virtualenv-delete", "-f", self.env_name],
stderr=subprocess.DEVNULL,
)
self.env_name = None
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None, []),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None, []),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
None,
"dummy-env",
[],
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, None, "dummy-env", []),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None, []),
("dev", reqs.REQ_SCHEME_DEV, False, None, None, []),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None, []),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None, []),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None, []),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None, []),
("source_dry_run", reqs.REQ_SCHEME_SOURCE, True, None, None, []),
("source", reqs.REQ_SCHEME_SOURCE, False, None, None, []),
("wheel_dry_run", reqs.REQ_SCHEME_WHEEL, True, None, None, []),
("wheel", reqs.REQ_SCHEME_WHEEL, False, None, None, []),
("package_dry_run", reqs.REQ_SCHEME_PACKAGE, True, "argcomplete", None, []),
("package", reqs.REQ_SCHEME_PACKAGE, False, "argcomplete", None, []),
("pip_dry_run", reqs.REQ_SCHEME_PIP, True, None, None, ["argcomplete"]),
("pip", reqs.REQ_SCHEME_PIP, False, None, None, ["argcomplete"]),
]
)
def test_PV_ENV_PYNV_210_remove(
self, name, req_scheme, dry_run, basename, env_name, pip_args
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
y = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=False,
force=True,
)
self.env_name = y.env_name
if not flags.should_suppress_output():
x.remove() # remove non-existent
y.create()
x.remove() # remove existing
else:
original_stderrs = []
with ctx.capture_to_file(x.remove) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
with ctx.capture_to_file(y.create) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
with ctx.capture_to_file(x.remove) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
testable_stderrs = [text.lower() for text in original_stderrs]
for (i, text) in enumerate(testable_stderrs):
if "error" in text:
print(original_stderrs[i], file=stderr)
self.assertNotIn("error", text)
########################################
@unittest.skipUnless(flags.should_run_pyenv_tests(), flags.SKIP_PYENV_MESSAGE)
class TestEnv_230_PyenvReplace(unittest.TestCase):
def setUp(self):
self.env_name = None
try:
self.choices
except AttributeError:
self.choices = (
[chr(x) for x in range(ord("0"), ord("9") + 1)]
+ [chr(x) for x in range(ord("A"), ord("Z") + 1)]
+ [chr(x) for x in range(ord("a"), ord("z") + 1)]
)
# Random prefix for environments is required
# since pyenv virtualenv doesn't give us a choice
# to place an environment somewhere specific.
self.env_prefix = "".join(random.choice(self.choices) for x in range(10)) + "-"
def tearDown(self):
if self.env_name is not None:
# remove pyenv virtual environment
subprocess.call(
["pyenv", "virtualenv-delete", "-f", self.env_name],
stderr=subprocess.DEVNULL,
)
self.env_name = None
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None, []),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None, []),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
None,
"dummy-env",
[],
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, None, "dummy-env", []),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None, []),
("dev", reqs.REQ_SCHEME_DEV, False, None, None, []),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None, []),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None, []),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None, []),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None, []),
("source_dry_run", reqs.REQ_SCHEME_SOURCE, True, None, None, []),
("source", reqs.REQ_SCHEME_SOURCE, False, None, None, []),
("wheel_dry_run", reqs.REQ_SCHEME_WHEEL, True, None, None, []),
("wheel", reqs.REQ_SCHEME_WHEEL, False, None, None, []),
("package_dry_run", reqs.REQ_SCHEME_PACKAGE, True, "argcomplete", None, []),
("package", reqs.REQ_SCHEME_PACKAGE, False, "argcomplete", None, []),
("pip_dry_run", reqs.REQ_SCHEME_PIP, True, None, None, ["argcomplete"]),
("pip", reqs.REQ_SCHEME_PIP, False, None, None, ["argcomplete"]),
]
)
def test_PV_ENV_PYNV_310_replace_nonexistent(
self, name, req_scheme, dry_run, basename, env_name, pip_args
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
self.env_name = x.env_name
if not flags.should_suppress_output():
x.replace()
else:
original_stderrs = []
with ctx.capture_to_file(x.replace) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
testable_stderrs = [text.lower() for text in original_stderrs]
for (i, text) in enumerate(testable_stderrs):
if "error" in text:
print(original_stderrs[i], file=stderr)
self.assertNotIn("error", text)
@parameterized.parameterized.expand(
[
("plain_dry_run", reqs.REQ_SCHEME_PLAIN, True, None, None, []),
("plain", reqs.REQ_SCHEME_PLAIN, False, None, None, []),
(
"plain_dry_run_env_name",
reqs.REQ_SCHEME_PLAIN,
True,
None,
"dummy-env",
[],
),
("plain_env_name", reqs.REQ_SCHEME_PLAIN, False, None, "dummy-env", []),
("dev_dry_run", reqs.REQ_SCHEME_DEV, True, None, None, []),
("dev", reqs.REQ_SCHEME_DEV, False, None, None, []),
("devplus_dry_run", reqs.REQ_SCHEME_DEVPLUS, True, None, None, []),
("devplus", reqs.REQ_SCHEME_DEVPLUS, False, None, None, []),
("frozen_dry_run", reqs.REQ_SCHEME_FROZEN, True, None, None, []),
("frozen", reqs.REQ_SCHEME_FROZEN, False, None, None, []),
("source_dry_run", reqs.REQ_SCHEME_SOURCE, True, None, None, []),
("source", reqs.REQ_SCHEME_SOURCE, False, None, None, []),
("wheel_dry_run", reqs.REQ_SCHEME_WHEEL, True, None, None, []),
("wheel", reqs.REQ_SCHEME_WHEEL, False, None, None, []),
("package_dry_run", reqs.REQ_SCHEME_PACKAGE, True, "argcomplete", None, []),
("package", reqs.REQ_SCHEME_PACKAGE, False, "argcomplete", None, []),
("pip_dry_run", reqs.REQ_SCHEME_PIP, True, None, None, ["argcomplete"]),
("pip", reqs.REQ_SCHEME_PIP, False, None, None, ["argcomplete"]),
]
)
def test_PV_ENV_PYNV_320_replace_existing(
self, name, req_scheme, dry_run, basename, env_name, pip_args
):
env_prefix = self.env_prefix
if env_name:
env_name = env_prefix + env_name
dirs = []
filespecs = {
"requirements.txt": "argcomplete",
"requirements_dev.txt": "argcomplete",
"requirements_frozen.txt": "argcomplete == 1.12.3",
os.path.join("dev", "requirements_build.txt"): "",
os.path.join("dev", "requirements_dev.txt"): "",
os.path.join("dev", "requirements_test.txt"): "parameterized",
}
with ctx.project("dummy_package", dirs=dirs, filespecs=filespecs):
x = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=dry_run,
force=True,
)
y = env.PyenvEnvironment(
req_scheme,
pip_args=pip_args,
basename=basename,
env_name=env_name,
env_prefix=env_prefix,
dry_run=False,
force=True,
)
self.env_name = y.env_name
if not flags.should_suppress_output():
y.create()
x.replace()
else:
original_stderrs = []
with ctx.capture_to_file(y.create) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
with ctx.capture_to_file(x.replace) as (_status, _stdout, stderr):
original_stderrs.append(stderr)
testable_stderrs = [text.lower() for text in original_stderrs]
for (i, text) in enumerate(testable_stderrs):
if "error" in text:
print(original_stderrs[i], file=stderr)
self.assertNotIn("error", text)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.