metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "johnkozaris/alveolus",
"score": 2
} |
#### File: alveolus/alveolus/settings.py
```python
import os
from configparser import RawConfigParser
from shutil import copy2
SOURCE_DIR = os.path.dirname(os.path.abspath(__file__))
WORKING_DIR = os.getcwd()
DOXYGEN_OUT_DIR = 'doxygen'
def _delete_files_safe_(files):
for file in files:
if os.path.isfile(file):
os.remove(file)
def _delete_dirs_safe_(dirs):
for directory in dirs:
if os.path.isdir(directory):
os.system("rm -rf " + directory)
class Settings:
proj_name = "Alveolus Project"
proj_version = "0.0.0"
proj_src_input = "./"
proj_exclude = ""
proj_output = "./output"
proj_api = "./doc_src"
def __init__(self):
main_conf_parser = RawConfigParser()
main_conf_parser.optionxform = str
try:
if os.path.isfile(os.path.join(WORKING_DIR, 'alveolus-config.ini')):
config_file = os.path.join(WORKING_DIR, 'alveolus-config.ini')
print('alveolus: Using custom configuration file')
else:
config_file = os.path.join(SOURCE_DIR, 'alveolus-config.ini')
print('alveolus: Using default configuration file')
main_conf_parser.read(config_file)
self.proj_name = main_conf_parser['PROJECT']['name']
self.proj_version = main_conf_parser['PROJECT']['version']
self.proj_src_input = main_conf_parser['CONFIG_DIRECTORIES']['src_code']
self.proj_exclude = main_conf_parser['CONFIG_DIRECTORIES']['exclude']
self.proj_output = main_conf_parser['CONFIG_DIRECTORIES']['output']
self.proj_api = main_conf_parser['CONFIG_DIRECTORIES']['src_api']
except Exception as e:
print("alveolus: Failed to read .ini file- REASON: " + str(e))
print("alveolus: Using default config values")
def create_alveolus_cofig(self):
if os.path.isdir(self.proj_api):
print('alveolus: Docs source Folder Found')
else:
os.mkdir(self.proj_api)
def clean_config(self):
_delete_dirs_safe_([os.path.join(*[self.proj_api, "doxygen_src"]), "output"])
_delete_files_safe_(['index.rst', 'doxygen.conf', 'Makefile', 'make.bat'])
def parse_doxy_config(self):
doxy_parser = RawConfigParser()
doxy_parser.optionxform = str
with open(os.path.join(SOURCE_DIR, "tools/doxygen.conf.in")) as stream:
doxy_parser.read_string("[DEFAULT]\n" + stream.read())
doxy_parser.set('DEFAULT', 'PROJECT_NAME', self.proj_name)
doxy_parser.set('DEFAULT', 'PROJECT_NUMBER', self.proj_version)
doxy_parser.set('DEFAULT', 'OUTPUT_DIRECTORY', os.path.join(self.proj_output, DOXYGEN_OUT_DIR))
doxy_parser.set('DEFAULT', 'INPUT', self.proj_src_input)
doxy_parser.set('DEFAULT', 'EXCLUDE', self.proj_exclude)
with open('doxygen.conf', 'w') as doxyfile:
doxy_parser.write(doxyfile)
print("alveolus: Doxygen Config file created")
def parse_sphinx_config(self):
proj_rsts = ""
for file in os.listdir(self.proj_api):
if file.endswith(".rst"):
proj_rsts += " " + os.path.join(self.proj_api, file) + '\n'
proj_rsts += " " + os.path.join(*[self.proj_api, "doxygen_src", "library_root"])
with open(os.path.join(SOURCE_DIR, "tools/index.rst.in"), "rt") as sphinxIndex:
with open("index.rst", "wt") as parsed_index:
new_index = sphinxIndex.read() \
.replace("@Title", self.proj_name) \
.replace("@toctree_depth", "1") \
.replace("@MainDescription", "")
if proj_rsts is not None:
new_index = new_index.replace("@toctree_include", proj_rsts)
else:
new_index.replace("@toctree_include", " ")
parsed_index.write(new_index)
with open(os.path.join(SOURCE_DIR, "tools/Makefile.in"), "rt") as sphinxMakefile:
with open("Makefile", "wt") as parsed_makefile:
new_makefile = sphinxMakefile.read() \
.replace("@source", self.proj_api) \
.replace("@output", self.proj_output) \
.replace("@exhale", self.proj_api + "/doxygen_src")
parsed_makefile.write(new_makefile)
copy2(os.path.join(SOURCE_DIR, "conf.py"), os.path.join(WORKING_DIR, "conf.py"))
# IF WINDOWS
# with open("./tools/make.bat.in", "rt") as sphinxbat:
# with open("./make.bat", "wt") as parsed_bat:
# new_index = sphinxMakefile.read() \
# .replace("@source", proj_api) \
# .replace("@output", proj_output) \
# parsed_makefile.write(new_index)
#
# print("doxygen config file created")
``` |
{
"source": "johnktims/pulumi-aws",
"score": 2
} |
#### File: pulumi_aws/appmesh/virtual_node.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class VirtualNode(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The ARN of the virtual node.
"""
created_date: pulumi.Output[str]
"""
The creation date of the virtual node.
"""
last_updated_date: pulumi.Output[str]
"""
The last update date of the virtual node.
"""
mesh_name: pulumi.Output[str]
"""
The name of the service mesh in which to create the virtual node.
"""
name: pulumi.Output[str]
"""
The name to use for the virtual node.
"""
spec: pulumi.Output[dict]
"""
The virtual node specification to apply.
* `backends` (`list`) - The backends to which the virtual node is expected to send outbound traffic.
* `virtualService` (`dict`) - Specifies a virtual service to use as a backend for a virtual node.
* `virtualServiceName` (`str`) - The name of the virtual service that is acting as a virtual node backend.
* `listener` (`dict`) - The listeners from which the virtual node is expected to receive inbound traffic.
* `health_check` (`dict`) - The health check information for the listener.
* `healthyThreshold` (`float`) - The number of consecutive successful health checks that must occur before declaring listener healthy.
* `interval_millis`- (Required) The time period in milliseconds between each health check execution.
* `intervalMillis` (`float`)
* `path` (`str`) - The destination path for the health check request. This is only required if the specified protocol is `http`.
* `port` (`float`) - The destination port for the health check request. This port must match the port defined in the `port_mapping` for the listener.
* `protocol` (`str`) - The protocol for the health check request. Valid values are `http` and `tcp`.
* `timeoutMillis` (`float`) - The amount of time to wait when receiving a response from the health check, in milliseconds.
* `unhealthyThreshold` (`float`) - The number of consecutive failed health checks that must occur before declaring a virtual node unhealthy.
* `portMapping` (`dict`) - The port mapping information for the listener.
* `port` (`float`) - The port used for the port mapping.
* `protocol` (`str`) - The protocol used for the port mapping. Valid values are `http` and `tcp`.
* `logging` (`dict`) - The inbound and outbound access logging information for the virtual node.
* `accessLog` (`dict`) - The access log configuration for a virtual node.
* `file` (`dict`) - The file object to send virtual node access logs to.
* `path` (`str`) - The file path to write access logs to. You can use `/dev/stdout` to send access logs to standard out.
* `serviceDiscovery` (`dict`) - The service discovery information for the virtual node.
* `awsCloudMap` (`dict`) - Specifies any AWS Cloud Map information for the virtual node.
* `attributes` (`dict`) - A string map that contains attributes with values that you can use to filter instances by any custom attribute that you specified when you registered the instance. Only instances that match all of the specified key/value pairs will be returned.
* `namespaceName` (`str`) - The name of the AWS Cloud Map namespace to use.
Use the [`servicediscovery.HttpNamespace`](https://www.terraform.io/docs/providers/aws/r/service_discovery_http_namespace.html) resource to configure a Cloud Map namespace.
* `service_name` (`str`) - The name of the AWS Cloud Map service to use. Use the [`servicediscovery.Service`](https://www.terraform.io/docs/providers/aws/r/service_discovery_service.html) resource to configure a Cloud Map service.
* `dns` (`dict`) - Specifies the DNS service name for the virtual node.
* `hostname` (`str`) - The DNS host name for your virtual node.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
def __init__(__self__, resource_name, opts=None, mesh_name=None, name=None, spec=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an AWS App Mesh virtual node resource.
## Breaking Changes
Because of backward incompatible API changes (read [here](https://github.com/awslabs/aws-app-mesh-examples/issues/92)), `appmesh.VirtualNode` resource definitions created with provider versions earlier than v2.3.0 will need to be modified:
* Rename the `service_name` attribute of the `dns` object to `hostname`.
* Replace the `backends` attribute of the `spec` object with one or more `backend` configuration blocks,
setting `virtual_service_name` to the name of the service.
The state associated with existing resources will automatically be migrated.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] mesh_name: The name of the service mesh in which to create the virtual node.
:param pulumi.Input[str] name: The name to use for the virtual node.
:param pulumi.Input[dict] spec: The virtual node specification to apply.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
The **spec** object supports the following:
* `backends` (`pulumi.Input[list]`) - The backends to which the virtual node is expected to send outbound traffic.
* `virtualService` (`pulumi.Input[dict]`) - Specifies a virtual service to use as a backend for a virtual node.
* `virtualServiceName` (`pulumi.Input[str]`) - The name of the virtual service that is acting as a virtual node backend.
* `listener` (`pulumi.Input[dict]`) - The listeners from which the virtual node is expected to receive inbound traffic.
* `health_check` (`pulumi.Input[dict]`) - The health check information for the listener.
* `healthyThreshold` (`pulumi.Input[float]`) - The number of consecutive successful health checks that must occur before declaring listener healthy.
* `interval_millis`- (Required) The time period in milliseconds between each health check execution.
* `intervalMillis` (`pulumi.Input[float]`)
* `path` (`pulumi.Input[str]`) - The destination path for the health check request. This is only required if the specified protocol is `http`.
* `port` (`pulumi.Input[float]`) - The destination port for the health check request. This port must match the port defined in the `port_mapping` for the listener.
* `protocol` (`pulumi.Input[str]`) - The protocol for the health check request. Valid values are `http` and `tcp`.
* `timeoutMillis` (`pulumi.Input[float]`) - The amount of time to wait when receiving a response from the health check, in milliseconds.
* `unhealthyThreshold` (`pulumi.Input[float]`) - The number of consecutive failed health checks that must occur before declaring a virtual node unhealthy.
* `portMapping` (`pulumi.Input[dict]`) - The port mapping information for the listener.
* `port` (`pulumi.Input[float]`) - The port used for the port mapping.
* `protocol` (`pulumi.Input[str]`) - The protocol used for the port mapping. Valid values are `http` and `tcp`.
* `logging` (`pulumi.Input[dict]`) - The inbound and outbound access logging information for the virtual node.
* `accessLog` (`pulumi.Input[dict]`) - The access log configuration for a virtual node.
* `file` (`pulumi.Input[dict]`) - The file object to send virtual node access logs to.
* `path` (`pulumi.Input[str]`) - The file path to write access logs to. You can use `/dev/stdout` to send access logs to standard out.
* `serviceDiscovery` (`pulumi.Input[dict]`) - The service discovery information for the virtual node.
* `awsCloudMap` (`pulumi.Input[dict]`) - Specifies any AWS Cloud Map information for the virtual node.
* `attributes` (`pulumi.Input[dict]`) - A string map that contains attributes with values that you can use to filter instances by any custom attribute that you specified when you registered the instance. Only instances that match all of the specified key/value pairs will be returned.
* `namespaceName` (`pulumi.Input[str]`) - The name of the AWS Cloud Map namespace to use.
Use the [`servicediscovery.HttpNamespace`](https://www.terraform.io/docs/providers/aws/r/service_discovery_http_namespace.html) resource to configure a Cloud Map namespace.
* `service_name` (`pulumi.Input[str]`) - The name of the AWS Cloud Map service to use. Use the [`servicediscovery.Service`](https://www.terraform.io/docs/providers/aws/r/service_discovery_service.html) resource to configure a Cloud Map service.
* `dns` (`pulumi.Input[dict]`) - Specifies the DNS service name for the virtual node.
* `hostname` (`pulumi.Input[str]`) - The DNS host name for your virtual node.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if mesh_name is None:
raise TypeError("Missing required property 'mesh_name'")
__props__['mesh_name'] = mesh_name
__props__['name'] = name
if spec is None:
raise TypeError("Missing required property 'spec'")
__props__['spec'] = spec
__props__['tags'] = tags
__props__['arn'] = None
__props__['created_date'] = None
__props__['last_updated_date'] = None
super(VirtualNode, __self__).__init__(
'aws:appmesh/virtualNode:VirtualNode',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, created_date=None, last_updated_date=None, mesh_name=None, name=None, spec=None, tags=None):
"""
Get an existing VirtualNode resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the virtual node.
:param pulumi.Input[str] created_date: The creation date of the virtual node.
:param pulumi.Input[str] last_updated_date: The last update date of the virtual node.
:param pulumi.Input[str] mesh_name: The name of the service mesh in which to create the virtual node.
:param pulumi.Input[str] name: The name to use for the virtual node.
:param pulumi.Input[dict] spec: The virtual node specification to apply.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
The **spec** object supports the following:
* `backends` (`pulumi.Input[list]`) - The backends to which the virtual node is expected to send outbound traffic.
* `virtualService` (`pulumi.Input[dict]`) - Specifies a virtual service to use as a backend for a virtual node.
* `virtualServiceName` (`pulumi.Input[str]`) - The name of the virtual service that is acting as a virtual node backend.
* `listener` (`pulumi.Input[dict]`) - The listeners from which the virtual node is expected to receive inbound traffic.
* `health_check` (`pulumi.Input[dict]`) - The health check information for the listener.
* `healthyThreshold` (`pulumi.Input[float]`) - The number of consecutive successful health checks that must occur before declaring listener healthy.
* `interval_millis`- (Required) The time period in milliseconds between each health check execution.
* `intervalMillis` (`pulumi.Input[float]`)
* `path` (`pulumi.Input[str]`) - The destination path for the health check request. This is only required if the specified protocol is `http`.
* `port` (`pulumi.Input[float]`) - The destination port for the health check request. This port must match the port defined in the `port_mapping` for the listener.
* `protocol` (`pulumi.Input[str]`) - The protocol for the health check request. Valid values are `http` and `tcp`.
* `timeoutMillis` (`pulumi.Input[float]`) - The amount of time to wait when receiving a response from the health check, in milliseconds.
* `unhealthyThreshold` (`pulumi.Input[float]`) - The number of consecutive failed health checks that must occur before declaring a virtual node unhealthy.
* `portMapping` (`pulumi.Input[dict]`) - The port mapping information for the listener.
* `port` (`pulumi.Input[float]`) - The port used for the port mapping.
* `protocol` (`pulumi.Input[str]`) - The protocol used for the port mapping. Valid values are `http` and `tcp`.
* `logging` (`pulumi.Input[dict]`) - The inbound and outbound access logging information for the virtual node.
* `accessLog` (`pulumi.Input[dict]`) - The access log configuration for a virtual node.
* `file` (`pulumi.Input[dict]`) - The file object to send virtual node access logs to.
* `path` (`pulumi.Input[str]`) - The file path to write access logs to. You can use `/dev/stdout` to send access logs to standard out.
* `serviceDiscovery` (`pulumi.Input[dict]`) - The service discovery information for the virtual node.
* `awsCloudMap` (`pulumi.Input[dict]`) - Specifies any AWS Cloud Map information for the virtual node.
* `attributes` (`pulumi.Input[dict]`) - A string map that contains attributes with values that you can use to filter instances by any custom attribute that you specified when you registered the instance. Only instances that match all of the specified key/value pairs will be returned.
* `namespaceName` (`pulumi.Input[str]`) - The name of the AWS Cloud Map namespace to use.
Use the [`servicediscovery.HttpNamespace`](https://www.terraform.io/docs/providers/aws/r/service_discovery_http_namespace.html) resource to configure a Cloud Map namespace.
* `service_name` (`pulumi.Input[str]`) - The name of the AWS Cloud Map service to use. Use the [`servicediscovery.Service`](https://www.terraform.io/docs/providers/aws/r/service_discovery_service.html) resource to configure a Cloud Map service.
* `dns` (`pulumi.Input[dict]`) - Specifies the DNS service name for the virtual node.
* `hostname` (`pulumi.Input[str]`) - The DNS host name for your virtual node.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["created_date"] = created_date
__props__["last_updated_date"] = last_updated_date
__props__["mesh_name"] = mesh_name
__props__["name"] = name
__props__["spec"] = spec
__props__["tags"] = tags
return VirtualNode(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/batch/job_definition.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class JobDefinition(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The Amazon Resource Name of the job definition.
"""
container_properties: pulumi.Output[str]
"""
A valid [container properties](http://docs.aws.amazon.com/batch/latest/APIReference/API_RegisterJobDefinition.html)
provided as a single valid JSON document. This parameter is required if the `type` parameter is `container`.
"""
name: pulumi.Output[str]
"""
Specifies the name of the job definition.
"""
parameters: pulumi.Output[dict]
"""
Specifies the parameter substitution placeholders to set in the job definition.
"""
retry_strategy: pulumi.Output[dict]
"""
Specifies the retry strategy to use for failed jobs that are submitted with this job definition.
Maximum number of `retry_strategy` is `1`. Defined below.
* `attempts` (`float`)
"""
revision: pulumi.Output[float]
"""
The revision of the job definition.
"""
timeout: pulumi.Output[dict]
"""
Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of `timeout` is `1`. Defined below.
* `attemptDurationSeconds` (`float`)
"""
type: pulumi.Output[str]
"""
The type of job definition. Must be `container`
"""
def __init__(__self__, resource_name, opts=None, container_properties=None, name=None, parameters=None, retry_strategy=None, timeout=None, type=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Batch Job Definition resource.
## retry_strategy
`retry_strategy` supports the following:
* `attempts` - (Optional) The number of times to move a job to the `RUNNABLE` status. You may specify between `1` and `10` attempts.
## timeout
`timeout` supports the following:
* `attempt_duration_seconds` - (Optional) The time duration in seconds after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is `60` seconds.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] container_properties: A valid [container properties](http://docs.aws.amazon.com/batch/latest/APIReference/API_RegisterJobDefinition.html)
provided as a single valid JSON document. This parameter is required if the `type` parameter is `container`.
:param pulumi.Input[str] name: Specifies the name of the job definition.
:param pulumi.Input[dict] parameters: Specifies the parameter substitution placeholders to set in the job definition.
:param pulumi.Input[dict] retry_strategy: Specifies the retry strategy to use for failed jobs that are submitted with this job definition.
Maximum number of `retry_strategy` is `1`. Defined below.
:param pulumi.Input[dict] timeout: Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of `timeout` is `1`. Defined below.
:param pulumi.Input[str] type: The type of job definition. Must be `container`
The **retry_strategy** object supports the following:
* `attempts` (`pulumi.Input[float]`)
The **timeout** object supports the following:
* `attemptDurationSeconds` (`pulumi.Input[float]`)
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['container_properties'] = container_properties
__props__['name'] = name
__props__['parameters'] = parameters
__props__['retry_strategy'] = retry_strategy
__props__['timeout'] = timeout
if type is None:
raise TypeError("Missing required property 'type'")
__props__['type'] = type
__props__['arn'] = None
__props__['revision'] = None
super(JobDefinition, __self__).__init__(
'aws:batch/jobDefinition:JobDefinition',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, container_properties=None, name=None, parameters=None, retry_strategy=None, revision=None, timeout=None, type=None):
"""
Get an existing JobDefinition resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name of the job definition.
:param pulumi.Input[str] container_properties: A valid [container properties](http://docs.aws.amazon.com/batch/latest/APIReference/API_RegisterJobDefinition.html)
provided as a single valid JSON document. This parameter is required if the `type` parameter is `container`.
:param pulumi.Input[str] name: Specifies the name of the job definition.
:param pulumi.Input[dict] parameters: Specifies the parameter substitution placeholders to set in the job definition.
:param pulumi.Input[dict] retry_strategy: Specifies the retry strategy to use for failed jobs that are submitted with this job definition.
Maximum number of `retry_strategy` is `1`. Defined below.
:param pulumi.Input[float] revision: The revision of the job definition.
:param pulumi.Input[dict] timeout: Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of `timeout` is `1`. Defined below.
:param pulumi.Input[str] type: The type of job definition. Must be `container`
The **retry_strategy** object supports the following:
* `attempts` (`pulumi.Input[float]`)
The **timeout** object supports the following:
* `attemptDurationSeconds` (`pulumi.Input[float]`)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["container_properties"] = container_properties
__props__["name"] = name
__props__["parameters"] = parameters
__props__["retry_strategy"] = retry_strategy
__props__["revision"] = revision
__props__["timeout"] = timeout
__props__["type"] = type
return JobDefinition(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/directconnect/connection.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Connection(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The ARN of the connection.
"""
aws_device: pulumi.Output[str]
"""
The Direct Connect endpoint on which the physical connection terminates.
"""
bandwidth: pulumi.Output[str]
"""
The bandwidth of the connection. Valid values for dedicated connections: 1Gbps, 10Gbps. Valid values for hosted connections: 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps and 10Gbps. Case sensitive.
"""
has_logical_redundancy: pulumi.Output[str]
"""
Indicates whether the connection supports a secondary BGP peer in the same address family (IPv4/IPv6).
"""
jumbo_frame_capable: pulumi.Output[bool]
"""
Boolean value representing if jumbo frames have been enabled for this connection.
"""
location: pulumi.Output[str]
"""
The AWS Direct Connect location where the connection is located. See [DescribeLocations](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeLocations.html) for the list of AWS Direct Connect locations. Use `locationCode`.
"""
name: pulumi.Output[str]
"""
The name of the connection.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
def __init__(__self__, resource_name, opts=None, bandwidth=None, location=None, name=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Connection of Direct Connect.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] bandwidth: The bandwidth of the connection. Valid values for dedicated connections: 1Gbps, 10Gbps. Valid values for hosted connections: 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps and 10Gbps. Case sensitive.
:param pulumi.Input[str] location: The AWS Direct Connect location where the connection is located. See [DescribeLocations](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeLocations.html) for the list of AWS Direct Connect locations. Use `locationCode`.
:param pulumi.Input[str] name: The name of the connection.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if bandwidth is None:
raise TypeError("Missing required property 'bandwidth'")
__props__['bandwidth'] = bandwidth
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
__props__['name'] = name
__props__['tags'] = tags
__props__['arn'] = None
__props__['aws_device'] = None
__props__['has_logical_redundancy'] = None
__props__['jumbo_frame_capable'] = None
super(Connection, __self__).__init__(
'aws:directconnect/connection:Connection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, aws_device=None, bandwidth=None, has_logical_redundancy=None, jumbo_frame_capable=None, location=None, name=None, tags=None):
"""
Get an existing Connection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the connection.
:param pulumi.Input[str] aws_device: The Direct Connect endpoint on which the physical connection terminates.
:param pulumi.Input[str] bandwidth: The bandwidth of the connection. Valid values for dedicated connections: 1Gbps, 10Gbps. Valid values for hosted connections: 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps and 10Gbps. Case sensitive.
:param pulumi.Input[str] has_logical_redundancy: Indicates whether the connection supports a secondary BGP peer in the same address family (IPv4/IPv6).
:param pulumi.Input[bool] jumbo_frame_capable: Boolean value representing if jumbo frames have been enabled for this connection.
:param pulumi.Input[str] location: The AWS Direct Connect location where the connection is located. See [DescribeLocations](https://docs.aws.amazon.com/directconnect/latest/APIReference/API_DescribeLocations.html) for the list of AWS Direct Connect locations. Use `locationCode`.
:param pulumi.Input[str] name: The name of the connection.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["aws_device"] = aws_device
__props__["bandwidth"] = bandwidth
__props__["has_logical_redundancy"] = has_logical_redundancy
__props__["jumbo_frame_capable"] = jumbo_frame_capable
__props__["location"] = location
__props__["name"] = name
__props__["tags"] = tags
return Connection(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/dynamodb/table.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Table(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The arn of the table
"""
attributes: pulumi.Output[list]
"""
List of nested attribute definitions. Only required for `hash_key` and `range_key` attributes. Each attribute has two properties:
* `name` (`str`) - The name of the index
* `type` (`str`) - Attribute type, which must be a scalar type: `S`, `N`, or `B` for (S)tring, (N)umber or (B)inary data
"""
billing_mode: pulumi.Output[str]
"""
Controls how you are charged for read and write throughput and how you manage capacity. The valid values are `PROVISIONED` and `PAY_PER_REQUEST`. Defaults to `PROVISIONED`.
"""
global_secondary_indexes: pulumi.Output[list]
"""
Describe a GSI for the table;
subject to the normal limits on the number of GSIs, projected
attributes, etc.
* `hash_key` (`str`) - The name of the hash key in the index; must be
defined as an attribute in the resource.
* `name` (`str`) - The name of the index
* `nonKeyAttributes` (`list`) - Only required with `INCLUDE` as a
projection type; a list of attributes to project into the index. These
do not need to be defined as attributes on the table.
* `projectionType` (`str`) - One of `ALL`, `INCLUDE` or `KEYS_ONLY`
where `ALL` projects every attribute into the index, `KEYS_ONLY`
projects just the hash and range key into the index, and `INCLUDE`
projects only the keys specified in the _non_key_attributes_
parameter.
* `range_key` (`str`) - The name of the range key; must be defined
* `read_capacity` (`float`) - The number of read units for this index. Must be set if billing_mode is set to PROVISIONED.
* `write_capacity` (`float`) - The number of write units for this index. Must be set if billing_mode is set to PROVISIONED.
"""
hash_key: pulumi.Output[str]
"""
The name of the hash key in the index; must be
defined as an attribute in the resource.
"""
local_secondary_indexes: pulumi.Output[list]
"""
Describe an LSI on the table;
these can only be allocated *at creation* so you cannot change this
definition after you have created the resource.
* `name` (`str`) - The name of the index
* `nonKeyAttributes` (`list`) - Only required with `INCLUDE` as a
projection type; a list of attributes to project into the index. These
do not need to be defined as attributes on the table.
* `projectionType` (`str`) - One of `ALL`, `INCLUDE` or `KEYS_ONLY`
where `ALL` projects every attribute into the index, `KEYS_ONLY`
projects just the hash and range key into the index, and `INCLUDE`
projects only the keys specified in the _non_key_attributes_
parameter.
* `range_key` (`str`) - The name of the range key; must be defined
"""
name: pulumi.Output[str]
"""
The name of the index
"""
point_in_time_recovery: pulumi.Output[dict]
"""
Point-in-time recovery options.
* `enabled` (`bool`) - Indicates whether ttl is enabled (true) or disabled (false).
"""
range_key: pulumi.Output[str]
"""
The name of the range key; must be defined
"""
read_capacity: pulumi.Output[float]
"""
The number of read units for this index. Must be set if billing_mode is set to PROVISIONED.
"""
replicas: pulumi.Output[list]
"""
Configuration block(s) with [DynamoDB Global Tables V2 (version 2019.11.21)](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) replication configurations. Detailed below.
* `regionName` (`str`) - Region name of the replica.
"""
server_side_encryption: pulumi.Output[dict]
"""
Encryption at rest options. AWS DynamoDB tables are automatically encrypted at rest with an AWS owned Customer Master Key if this argument isn't specified.
* `enabled` (`bool`) - Indicates whether ttl is enabled (true) or disabled (false).
* `kms_key_arn` (`str`) - The ARN of the CMK that should be used for the AWS KMS encryption.
This attribute should only be specified if the key is different from the default DynamoDB CMK, `alias/aws/dynamodb`.
"""
stream_arn: pulumi.Output[str]
"""
The ARN of the Table Stream. Only available when `stream_enabled = true`
"""
stream_enabled: pulumi.Output[bool]
"""
Indicates whether Streams are to be enabled (true) or disabled (false).
"""
stream_label: pulumi.Output[str]
"""
A timestamp, in ISO 8601 format, for this stream. Note that this timestamp is not
a unique identifier for the stream on its own. However, the combination of AWS customer ID,
table name and this field is guaranteed to be unique.
It can be used for creating CloudWatch Alarms. Only available when `stream_enabled = true`
"""
stream_view_type: pulumi.Output[str]
"""
When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are `KEYS_ONLY`, `NEW_IMAGE`, `OLD_IMAGE`, `NEW_AND_OLD_IMAGES`.
"""
tags: pulumi.Output[dict]
"""
A map of tags to populate on the created table.
"""
ttl: pulumi.Output[dict]
"""
Defines ttl, has two properties, and can only be specified once:
* `attributeName` (`str`) - The name of the table attribute to store the TTL timestamp in.
* `enabled` (`bool`) - Indicates whether ttl is enabled (true) or disabled (false).
"""
write_capacity: pulumi.Output[float]
"""
The number of write units for this index. Must be set if billing_mode is set to PROVISIONED.
"""
def __init__(__self__, resource_name, opts=None, attributes=None, billing_mode=None, global_secondary_indexes=None, hash_key=None, local_secondary_indexes=None, name=None, point_in_time_recovery=None, range_key=None, read_capacity=None, replicas=None, server_side_encryption=None, stream_enabled=None, stream_view_type=None, tags=None, ttl=None, write_capacity=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a DynamoDB table resource
> **Note:** It is recommended to use [`ignoreChanges`](https://www.pulumi.com/docs/intro/concepts/programming-model/#ignorechanges) for `read_capacity` and/or `write_capacity` if there's [autoscaling policy](https://www.terraform.io/docs/providers/aws/r/appautoscaling_policy.html) attached to the table.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] attributes: List of nested attribute definitions. Only required for `hash_key` and `range_key` attributes. Each attribute has two properties:
:param pulumi.Input[str] billing_mode: Controls how you are charged for read and write throughput and how you manage capacity. The valid values are `PROVISIONED` and `PAY_PER_REQUEST`. Defaults to `PROVISIONED`.
:param pulumi.Input[list] global_secondary_indexes: Describe a GSI for the table;
subject to the normal limits on the number of GSIs, projected
attributes, etc.
:param pulumi.Input[str] hash_key: The name of the hash key in the index; must be
defined as an attribute in the resource.
:param pulumi.Input[list] local_secondary_indexes: Describe an LSI on the table;
these can only be allocated *at creation* so you cannot change this
definition after you have created the resource.
:param pulumi.Input[str] name: The name of the index
:param pulumi.Input[dict] point_in_time_recovery: Point-in-time recovery options.
:param pulumi.Input[str] range_key: The name of the range key; must be defined
:param pulumi.Input[float] read_capacity: The number of read units for this index. Must be set if billing_mode is set to PROVISIONED.
:param pulumi.Input[list] replicas: Configuration block(s) with [DynamoDB Global Tables V2 (version 2019.11.21)](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) replication configurations. Detailed below.
:param pulumi.Input[dict] server_side_encryption: Encryption at rest options. AWS DynamoDB tables are automatically encrypted at rest with an AWS owned Customer Master Key if this argument isn't specified.
:param pulumi.Input[bool] stream_enabled: Indicates whether Streams are to be enabled (true) or disabled (false).
:param pulumi.Input[str] stream_view_type: When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are `KEYS_ONLY`, `NEW_IMAGE`, `OLD_IMAGE`, `NEW_AND_OLD_IMAGES`.
:param pulumi.Input[dict] tags: A map of tags to populate on the created table.
:param pulumi.Input[dict] ttl: Defines ttl, has two properties, and can only be specified once:
:param pulumi.Input[float] write_capacity: The number of write units for this index. Must be set if billing_mode is set to PROVISIONED.
The **attributes** object supports the following:
* `name` (`pulumi.Input[str]`) - The name of the index
* `type` (`pulumi.Input[str]`) - Attribute type, which must be a scalar type: `S`, `N`, or `B` for (S)tring, (N)umber or (B)inary data
The **global_secondary_indexes** object supports the following:
* `hash_key` (`pulumi.Input[str]`) - The name of the hash key in the index; must be
defined as an attribute in the resource.
* `name` (`pulumi.Input[str]`) - The name of the index
* `nonKeyAttributes` (`pulumi.Input[list]`) - Only required with `INCLUDE` as a
projection type; a list of attributes to project into the index. These
do not need to be defined as attributes on the table.
* `projectionType` (`pulumi.Input[str]`) - One of `ALL`, `INCLUDE` or `KEYS_ONLY`
where `ALL` projects every attribute into the index, `KEYS_ONLY`
projects just the hash and range key into the index, and `INCLUDE`
projects only the keys specified in the _non_key_attributes_
parameter.
* `range_key` (`pulumi.Input[str]`) - The name of the range key; must be defined
* `read_capacity` (`pulumi.Input[float]`) - The number of read units for this index. Must be set if billing_mode is set to PROVISIONED.
* `write_capacity` (`pulumi.Input[float]`) - The number of write units for this index. Must be set if billing_mode is set to PROVISIONED.
The **local_secondary_indexes** object supports the following:
* `name` (`pulumi.Input[str]`) - The name of the index
* `nonKeyAttributes` (`pulumi.Input[list]`) - Only required with `INCLUDE` as a
projection type; a list of attributes to project into the index. These
do not need to be defined as attributes on the table.
* `projectionType` (`pulumi.Input[str]`) - One of `ALL`, `INCLUDE` or `KEYS_ONLY`
where `ALL` projects every attribute into the index, `KEYS_ONLY`
projects just the hash and range key into the index, and `INCLUDE`
projects only the keys specified in the _non_key_attributes_
parameter.
* `range_key` (`pulumi.Input[str]`) - The name of the range key; must be defined
The **point_in_time_recovery** object supports the following:
* `enabled` (`pulumi.Input[bool]`) - Indicates whether ttl is enabled (true) or disabled (false).
The **replicas** object supports the following:
* `regionName` (`pulumi.Input[str]`) - Region name of the replica.
The **server_side_encryption** object supports the following:
* `enabled` (`pulumi.Input[bool]`) - Indicates whether ttl is enabled (true) or disabled (false).
* `kms_key_arn` (`pulumi.Input[str]`) - The ARN of the CMK that should be used for the AWS KMS encryption.
This attribute should only be specified if the key is different from the default DynamoDB CMK, `alias/aws/dynamodb`.
The **ttl** object supports the following:
* `attributeName` (`pulumi.Input[str]`) - The name of the table attribute to store the TTL timestamp in.
* `enabled` (`pulumi.Input[bool]`) - Indicates whether ttl is enabled (true) or disabled (false).
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if attributes is None:
raise TypeError("Missing required property 'attributes'")
__props__['attributes'] = attributes
__props__['billing_mode'] = billing_mode
__props__['global_secondary_indexes'] = global_secondary_indexes
if hash_key is None:
raise TypeError("Missing required property 'hash_key'")
__props__['hash_key'] = hash_key
__props__['local_secondary_indexes'] = local_secondary_indexes
__props__['name'] = name
__props__['point_in_time_recovery'] = point_in_time_recovery
__props__['range_key'] = range_key
__props__['read_capacity'] = read_capacity
__props__['replicas'] = replicas
__props__['server_side_encryption'] = server_side_encryption
__props__['stream_enabled'] = stream_enabled
__props__['stream_view_type'] = stream_view_type
__props__['tags'] = tags
__props__['ttl'] = ttl
__props__['write_capacity'] = write_capacity
__props__['arn'] = None
__props__['stream_arn'] = None
__props__['stream_label'] = None
super(Table, __self__).__init__(
'aws:dynamodb/table:Table',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, attributes=None, billing_mode=None, global_secondary_indexes=None, hash_key=None, local_secondary_indexes=None, name=None, point_in_time_recovery=None, range_key=None, read_capacity=None, replicas=None, server_side_encryption=None, stream_arn=None, stream_enabled=None, stream_label=None, stream_view_type=None, tags=None, ttl=None, write_capacity=None):
"""
Get an existing Table resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The arn of the table
:param pulumi.Input[list] attributes: List of nested attribute definitions. Only required for `hash_key` and `range_key` attributes. Each attribute has two properties:
:param pulumi.Input[str] billing_mode: Controls how you are charged for read and write throughput and how you manage capacity. The valid values are `PROVISIONED` and `PAY_PER_REQUEST`. Defaults to `PROVISIONED`.
:param pulumi.Input[list] global_secondary_indexes: Describe a GSI for the table;
subject to the normal limits on the number of GSIs, projected
attributes, etc.
:param pulumi.Input[str] hash_key: The name of the hash key in the index; must be
defined as an attribute in the resource.
:param pulumi.Input[list] local_secondary_indexes: Describe an LSI on the table;
these can only be allocated *at creation* so you cannot change this
definition after you have created the resource.
:param pulumi.Input[str] name: The name of the index
:param pulumi.Input[dict] point_in_time_recovery: Point-in-time recovery options.
:param pulumi.Input[str] range_key: The name of the range key; must be defined
:param pulumi.Input[float] read_capacity: The number of read units for this index. Must be set if billing_mode is set to PROVISIONED.
:param pulumi.Input[list] replicas: Configuration block(s) with [DynamoDB Global Tables V2 (version 2019.11.21)](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) replication configurations. Detailed below.
:param pulumi.Input[dict] server_side_encryption: Encryption at rest options. AWS DynamoDB tables are automatically encrypted at rest with an AWS owned Customer Master Key if this argument isn't specified.
:param pulumi.Input[str] stream_arn: The ARN of the Table Stream. Only available when `stream_enabled = true`
:param pulumi.Input[bool] stream_enabled: Indicates whether Streams are to be enabled (true) or disabled (false).
:param pulumi.Input[str] stream_label: A timestamp, in ISO 8601 format, for this stream. Note that this timestamp is not
a unique identifier for the stream on its own. However, the combination of AWS customer ID,
table name and this field is guaranteed to be unique.
It can be used for creating CloudWatch Alarms. Only available when `stream_enabled = true`
:param pulumi.Input[str] stream_view_type: When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are `KEYS_ONLY`, `NEW_IMAGE`, `OLD_IMAGE`, `NEW_AND_OLD_IMAGES`.
:param pulumi.Input[dict] tags: A map of tags to populate on the created table.
:param pulumi.Input[dict] ttl: Defines ttl, has two properties, and can only be specified once:
:param pulumi.Input[float] write_capacity: The number of write units for this index. Must be set if billing_mode is set to PROVISIONED.
The **attributes** object supports the following:
* `name` (`pulumi.Input[str]`) - The name of the index
* `type` (`pulumi.Input[str]`) - Attribute type, which must be a scalar type: `S`, `N`, or `B` for (S)tring, (N)umber or (B)inary data
The **global_secondary_indexes** object supports the following:
* `hash_key` (`pulumi.Input[str]`) - The name of the hash key in the index; must be
defined as an attribute in the resource.
* `name` (`pulumi.Input[str]`) - The name of the index
* `nonKeyAttributes` (`pulumi.Input[list]`) - Only required with `INCLUDE` as a
projection type; a list of attributes to project into the index. These
do not need to be defined as attributes on the table.
* `projectionType` (`pulumi.Input[str]`) - One of `ALL`, `INCLUDE` or `KEYS_ONLY`
where `ALL` projects every attribute into the index, `KEYS_ONLY`
projects just the hash and range key into the index, and `INCLUDE`
projects only the keys specified in the _non_key_attributes_
parameter.
* `range_key` (`pulumi.Input[str]`) - The name of the range key; must be defined
* `read_capacity` (`pulumi.Input[float]`) - The number of read units for this index. Must be set if billing_mode is set to PROVISIONED.
* `write_capacity` (`pulumi.Input[float]`) - The number of write units for this index. Must be set if billing_mode is set to PROVISIONED.
The **local_secondary_indexes** object supports the following:
* `name` (`pulumi.Input[str]`) - The name of the index
* `nonKeyAttributes` (`pulumi.Input[list]`) - Only required with `INCLUDE` as a
projection type; a list of attributes to project into the index. These
do not need to be defined as attributes on the table.
* `projectionType` (`pulumi.Input[str]`) - One of `ALL`, `INCLUDE` or `KEYS_ONLY`
where `ALL` projects every attribute into the index, `KEYS_ONLY`
projects just the hash and range key into the index, and `INCLUDE`
projects only the keys specified in the _non_key_attributes_
parameter.
* `range_key` (`pulumi.Input[str]`) - The name of the range key; must be defined
The **point_in_time_recovery** object supports the following:
* `enabled` (`pulumi.Input[bool]`) - Indicates whether ttl is enabled (true) or disabled (false).
The **replicas** object supports the following:
* `regionName` (`pulumi.Input[str]`) - Region name of the replica.
The **server_side_encryption** object supports the following:
* `enabled` (`pulumi.Input[bool]`) - Indicates whether ttl is enabled (true) or disabled (false).
* `kms_key_arn` (`pulumi.Input[str]`) - The ARN of the CMK that should be used for the AWS KMS encryption.
This attribute should only be specified if the key is different from the default DynamoDB CMK, `alias/aws/dynamodb`.
The **ttl** object supports the following:
* `attributeName` (`pulumi.Input[str]`) - The name of the table attribute to store the TTL timestamp in.
* `enabled` (`pulumi.Input[bool]`) - Indicates whether ttl is enabled (true) or disabled (false).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["attributes"] = attributes
__props__["billing_mode"] = billing_mode
__props__["global_secondary_indexes"] = global_secondary_indexes
__props__["hash_key"] = hash_key
__props__["local_secondary_indexes"] = local_secondary_indexes
__props__["name"] = name
__props__["point_in_time_recovery"] = point_in_time_recovery
__props__["range_key"] = range_key
__props__["read_capacity"] = read_capacity
__props__["replicas"] = replicas
__props__["server_side_encryption"] = server_side_encryption
__props__["stream_arn"] = stream_arn
__props__["stream_enabled"] = stream_enabled
__props__["stream_label"] = stream_label
__props__["stream_view_type"] = stream_view_type
__props__["tags"] = tags
__props__["ttl"] = ttl
__props__["write_capacity"] = write_capacity
return Table(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/ec2/get_instance_type_offering.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetInstanceTypeOfferingResult:
"""
A collection of values returned by getInstanceTypeOffering.
"""
def __init__(__self__, filters=None, id=None, instance_type=None, location_type=None, preferred_instance_types=None):
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
__self__.filters = filters
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
if instance_type and not isinstance(instance_type, str):
raise TypeError("Expected argument 'instance_type' to be a str")
__self__.instance_type = instance_type
"""
EC2 Instance Type.
"""
if location_type and not isinstance(location_type, str):
raise TypeError("Expected argument 'location_type' to be a str")
__self__.location_type = location_type
if preferred_instance_types and not isinstance(preferred_instance_types, list):
raise TypeError("Expected argument 'preferred_instance_types' to be a list")
__self__.preferred_instance_types = preferred_instance_types
class AwaitableGetInstanceTypeOfferingResult(GetInstanceTypeOfferingResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetInstanceTypeOfferingResult(
filters=self.filters,
id=self.id,
instance_type=self.instance_type,
location_type=self.location_type,
preferred_instance_types=self.preferred_instance_types)
def get_instance_type_offering(filters=None,location_type=None,preferred_instance_types=None,opts=None):
"""
Information about single EC2 Instance Type Offering.
:param list filters: One or more configuration blocks containing name-values filters. See the [EC2 API Reference](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstanceTypeOfferings.html) for supported filters. Detailed below.
:param str location_type: Location type. Defaults to `region`. Valid values: `availability-zone`, `availability-zone-id`, and `region`.
:param list preferred_instance_types: Ordered list of preferred EC2 Instance Types. The first match in this list will be returned. If no preferred matches are found and the original search returned more than one result, an error is returned.
The **filters** object supports the following:
* `name` (`str`) - Name of the filter. The `location` filter depends on the top-level `location_type` argument and if not specified, defaults to the current region.
* `values` (`list`) - List of one or more values for the filter.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['locationType'] = location_type
__args__['preferredInstanceTypes'] = preferred_instance_types
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:ec2/getInstanceTypeOffering:getInstanceTypeOffering', __args__, opts=opts).value
return AwaitableGetInstanceTypeOfferingResult(
filters=__ret__.get('filters'),
id=__ret__.get('id'),
instance_type=__ret__.get('instanceType'),
location_type=__ret__.get('locationType'),
preferred_instance_types=__ret__.get('preferredInstanceTypes'))
```
#### File: pulumi_aws/ecr/get_credentials.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetCredentialsResult:
"""
A collection of values returned by getCredentials.
"""
def __init__(__self__, authorization_token=None, expires_at=None, id=None, proxy_endpoint=None, registry_id=None):
if authorization_token and not isinstance(authorization_token, str):
raise TypeError("Expected argument 'authorization_token' to be a str")
__self__.authorization_token = authorization_token
if expires_at and not isinstance(expires_at, str):
raise TypeError("Expected argument 'expires_at' to be a str")
__self__.expires_at = expires_at
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
if proxy_endpoint and not isinstance(proxy_endpoint, str):
raise TypeError("Expected argument 'proxy_endpoint' to be a str")
__self__.proxy_endpoint = proxy_endpoint
if registry_id and not isinstance(registry_id, str):
raise TypeError("Expected argument 'registry_id' to be a str")
__self__.registry_id = registry_id
class AwaitableGetCredentialsResult(GetCredentialsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCredentialsResult(
authorization_token=self.authorization_token,
expires_at=self.expires_at,
id=self.id,
proxy_endpoint=self.proxy_endpoint,
registry_id=self.registry_id)
def get_credentials(registry_id=None,opts=None):
"""
Use this data source to access information about an existing resource.
"""
__args__ = dict()
__args__['registryId'] = registry_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:ecr/getCredentials:getCredentials', __args__, opts=opts).value
return AwaitableGetCredentialsResult(
authorization_token=__ret__.get('authorizationToken'),
expires_at=__ret__.get('expiresAt'),
id=__ret__.get('id'),
proxy_endpoint=__ret__.get('proxyEndpoint'),
registry_id=__ret__.get('registryId'))
```
#### File: pulumi_aws/elasticache/cluster.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Cluster(pulumi.CustomResource):
apply_immediately: pulumi.Output[bool]
"""
Specifies whether any database modifications
are applied immediately, or during the next maintenance window. Default is
`false`. See [Amazon ElastiCache Documentation for more information.][1]
(Available since v0.6.0)
"""
arn: pulumi.Output[str]
availability_zone: pulumi.Output[str]
"""
The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone.
"""
az_mode: pulumi.Output[str]
"""
Specifies whether the nodes in this Memcached node group are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. Valid values for this parameter are `single-az` or `cross-az`, default is `single-az`. If you want to choose `cross-az`, `num_cache_nodes` must be greater than `1`
"""
cache_nodes: pulumi.Output[list]
"""
List of node objects including `id`, `address`, `port` and `availability_zone`.
Referenceable e.g. as `${aws_elasticache_cluster.bar.cache_nodes.0.address}`
* `address` (`str`)
* `availability_zone` (`str`) - The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone.
* `id` (`str`)
* `port` (`float`) - The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`.
"""
cluster_address: pulumi.Output[str]
"""
(Memcached only) The DNS name of the cache cluster without the port appended.
"""
cluster_id: pulumi.Output[str]
"""
Group identifier. ElastiCache converts
this name to lowercase
"""
configuration_endpoint: pulumi.Output[str]
"""
(Memcached only) The configuration endpoint to allow host discovery.
"""
engine: pulumi.Output[str]
"""
Name of the cache engine to be used for this cache cluster.
Valid values for this parameter are `memcached` or `redis`
"""
engine_version: pulumi.Output[str]
"""
Version number of the cache engine to be used.
See [Describe Cache Engine Versions](https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-engine-versions.html)
in the AWS Documentation center for supported versions
"""
maintenance_window: pulumi.Output[str]
"""
Specifies the weekly time range for when maintenance
on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC).
The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
"""
node_type: pulumi.Output[str]
"""
The compute and memory capacity of the nodes. See
[Available Cache Node Types](https://aws.amazon.com/elasticache/details#Available_Cache_Node_Types) for
supported node types
"""
notification_topic_arn: pulumi.Output[str]
"""
An Amazon Resource Name (ARN) of an
SNS topic to send ElastiCache notifications to. Example:
`arn:aws:sns:us-east-1:012345678999:my_sns_topic`
"""
num_cache_nodes: pulumi.Output[float]
"""
The initial number of cache nodes that the
cache cluster will have. For Redis, this value must be 1. For Memcache, this
value must be between 1 and 20. If this number is reduced on subsequent runs,
the highest numbered nodes will be removed.
"""
parameter_group_name: pulumi.Output[str]
"""
Name of the parameter group to associate
with this cache cluster
"""
port: pulumi.Output[float]
"""
The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`.
"""
preferred_availability_zones: pulumi.Output[list]
"""
A list of the Availability Zones in which cache nodes are created. If you are creating your cluster in an Amazon VPC you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of `num_cache_nodes`. If you want all the nodes in the same Availability Zone, use `availability_zone` instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. Detecting drift of existing node availability zone is not currently supported. Updating this argument by itself to migrate existing node availability zones is not currently supported and will show a perpetual difference.
"""
replication_group_id: pulumi.Output[str]
"""
The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group.
"""
security_group_ids: pulumi.Output[list]
"""
One or more VPC security groups associated
with the cache cluster
"""
security_group_names: pulumi.Output[list]
"""
List of security group
names to associate with this cache cluster
"""
snapshot_arns: pulumi.Output[list]
"""
A single-element string list containing an
Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
"""
snapshot_name: pulumi.Output[str]
"""
The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource.
"""
snapshot_retention_limit: pulumi.Output[float]
"""
The number of days for which ElastiCache will
retain automatic cache cluster snapshots before deleting them. For example, if you set
SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes
"""
snapshot_window: pulumi.Output[str]
"""
The daily time range (in UTC) during which ElastiCache will
begin taking a daily snapshot of your cache cluster. Example: 05:00-09:00
"""
subnet_group_name: pulumi.Output[str]
"""
Name of the subnet group to be used
for the cache cluster.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource
"""
def __init__(__self__, resource_name, opts=None, apply_immediately=None, availability_zone=None, az_mode=None, cluster_id=None, engine=None, engine_version=None, maintenance_window=None, node_type=None, notification_topic_arn=None, num_cache_nodes=None, parameter_group_name=None, port=None, preferred_availability_zones=None, replication_group_id=None, security_group_ids=None, security_group_names=None, snapshot_arns=None, snapshot_name=None, snapshot_retention_limit=None, snapshot_window=None, subnet_group_name=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an ElastiCache Cluster resource, which manages a Memcached cluster or Redis instance.
For working with Redis (Cluster Mode Enabled) replication groups, see the
[`elasticache.ReplicationGroup` resource](https://www.terraform.io/docs/providers/aws/r/elasticache_replication_group.html).
> **Note:** When you change an attribute, such as `node_type`, by default
it is applied in the next maintenance window. Because of this, this provider may report
a difference in its planning phase because the actual modification has not yet taken
place. You can use the `apply_immediately` flag to instruct the service to apply the
change immediately. Using `apply_immediately` can result in a brief downtime as the server reboots.
See the AWS Docs on [Modifying an ElastiCache Cache Cluster][2] for more information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] apply_immediately: Specifies whether any database modifications
are applied immediately, or during the next maintenance window. Default is
`false`. See [Amazon ElastiCache Documentation for more information.][1]
(Available since v0.6.0)
:param pulumi.Input[str] availability_zone: The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone.
:param pulumi.Input[str] az_mode: Specifies whether the nodes in this Memcached node group are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. Valid values for this parameter are `single-az` or `cross-az`, default is `single-az`. If you want to choose `cross-az`, `num_cache_nodes` must be greater than `1`
:param pulumi.Input[str] cluster_id: Group identifier. ElastiCache converts
this name to lowercase
:param pulumi.Input[str] engine: Name of the cache engine to be used for this cache cluster.
Valid values for this parameter are `memcached` or `redis`
:param pulumi.Input[str] engine_version: Version number of the cache engine to be used.
See [Describe Cache Engine Versions](https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-engine-versions.html)
in the AWS Documentation center for supported versions
:param pulumi.Input[str] maintenance_window: Specifies the weekly time range for when maintenance
on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC).
The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
:param pulumi.Input[str] node_type: The compute and memory capacity of the nodes. See
[Available Cache Node Types](https://aws.amazon.com/elasticache/details#Available_Cache_Node_Types) for
supported node types
:param pulumi.Input[str] notification_topic_arn: An Amazon Resource Name (ARN) of an
SNS topic to send ElastiCache notifications to. Example:
`arn:aws:sns:us-east-1:012345678999:my_sns_topic`
:param pulumi.Input[float] num_cache_nodes: The initial number of cache nodes that the
cache cluster will have. For Redis, this value must be 1. For Memcache, this
value must be between 1 and 20. If this number is reduced on subsequent runs,
the highest numbered nodes will be removed.
:param pulumi.Input[str] parameter_group_name: Name of the parameter group to associate
with this cache cluster
:param pulumi.Input[float] port: The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`.
:param pulumi.Input[list] preferred_availability_zones: A list of the Availability Zones in which cache nodes are created. If you are creating your cluster in an Amazon VPC you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of `num_cache_nodes`. If you want all the nodes in the same Availability Zone, use `availability_zone` instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. Detecting drift of existing node availability zone is not currently supported. Updating this argument by itself to migrate existing node availability zones is not currently supported and will show a perpetual difference.
:param pulumi.Input[str] replication_group_id: The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group.
:param pulumi.Input[list] security_group_ids: One or more VPC security groups associated
with the cache cluster
:param pulumi.Input[list] security_group_names: List of security group
names to associate with this cache cluster
:param pulumi.Input[list] snapshot_arns: A single-element string list containing an
Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
:param pulumi.Input[str] snapshot_name: The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource.
:param pulumi.Input[float] snapshot_retention_limit: The number of days for which ElastiCache will
retain automatic cache cluster snapshots before deleting them. For example, if you set
SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes
:param pulumi.Input[str] snapshot_window: The daily time range (in UTC) during which ElastiCache will
begin taking a daily snapshot of your cache cluster. Example: 05:00-09:00
:param pulumi.Input[str] subnet_group_name: Name of the subnet group to be used
for the cache cluster.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['apply_immediately'] = apply_immediately
__props__['availability_zone'] = availability_zone
__props__['az_mode'] = az_mode
__props__['cluster_id'] = cluster_id
__props__['engine'] = engine
__props__['engine_version'] = engine_version
__props__['maintenance_window'] = maintenance_window
__props__['node_type'] = node_type
__props__['notification_topic_arn'] = notification_topic_arn
__props__['num_cache_nodes'] = num_cache_nodes
__props__['parameter_group_name'] = parameter_group_name
__props__['port'] = port
__props__['preferred_availability_zones'] = preferred_availability_zones
__props__['replication_group_id'] = replication_group_id
__props__['security_group_ids'] = security_group_ids
__props__['security_group_names'] = security_group_names
__props__['snapshot_arns'] = snapshot_arns
__props__['snapshot_name'] = snapshot_name
__props__['snapshot_retention_limit'] = snapshot_retention_limit
__props__['snapshot_window'] = snapshot_window
__props__['subnet_group_name'] = subnet_group_name
__props__['tags'] = tags
__props__['arn'] = None
__props__['cache_nodes'] = None
__props__['cluster_address'] = None
__props__['configuration_endpoint'] = None
super(Cluster, __self__).__init__(
'aws:elasticache/cluster:Cluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, apply_immediately=None, arn=None, availability_zone=None, az_mode=None, cache_nodes=None, cluster_address=None, cluster_id=None, configuration_endpoint=None, engine=None, engine_version=None, maintenance_window=None, node_type=None, notification_topic_arn=None, num_cache_nodes=None, parameter_group_name=None, port=None, preferred_availability_zones=None, replication_group_id=None, security_group_ids=None, security_group_names=None, snapshot_arns=None, snapshot_name=None, snapshot_retention_limit=None, snapshot_window=None, subnet_group_name=None, tags=None):
"""
Get an existing Cluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] apply_immediately: Specifies whether any database modifications
are applied immediately, or during the next maintenance window. Default is
`false`. See [Amazon ElastiCache Documentation for more information.][1]
(Available since v0.6.0)
:param pulumi.Input[str] availability_zone: The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone.
:param pulumi.Input[str] az_mode: Specifies whether the nodes in this Memcached node group are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. Valid values for this parameter are `single-az` or `cross-az`, default is `single-az`. If you want to choose `cross-az`, `num_cache_nodes` must be greater than `1`
:param pulumi.Input[list] cache_nodes: List of node objects including `id`, `address`, `port` and `availability_zone`.
Referenceable e.g. as `${aws_elasticache_cluster.bar.cache_nodes.0.address}`
:param pulumi.Input[str] cluster_address: (Memcached only) The DNS name of the cache cluster without the port appended.
:param pulumi.Input[str] cluster_id: Group identifier. ElastiCache converts
this name to lowercase
:param pulumi.Input[str] configuration_endpoint: (Memcached only) The configuration endpoint to allow host discovery.
:param pulumi.Input[str] engine: Name of the cache engine to be used for this cache cluster.
Valid values for this parameter are `memcached` or `redis`
:param pulumi.Input[str] engine_version: Version number of the cache engine to be used.
See [Describe Cache Engine Versions](https://docs.aws.amazon.com/cli/latest/reference/elasticache/describe-cache-engine-versions.html)
in the AWS Documentation center for supported versions
:param pulumi.Input[str] maintenance_window: Specifies the weekly time range for when maintenance
on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC).
The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
:param pulumi.Input[str] node_type: The compute and memory capacity of the nodes. See
[Available Cache Node Types](https://aws.amazon.com/elasticache/details#Available_Cache_Node_Types) for
supported node types
:param pulumi.Input[str] notification_topic_arn: An Amazon Resource Name (ARN) of an
SNS topic to send ElastiCache notifications to. Example:
`arn:aws:sns:us-east-1:012345678999:my_sns_topic`
:param pulumi.Input[float] num_cache_nodes: The initial number of cache nodes that the
cache cluster will have. For Redis, this value must be 1. For Memcache, this
value must be between 1 and 20. If this number is reduced on subsequent runs,
the highest numbered nodes will be removed.
:param pulumi.Input[str] parameter_group_name: Name of the parameter group to associate
with this cache cluster
:param pulumi.Input[float] port: The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`.
:param pulumi.Input[list] preferred_availability_zones: A list of the Availability Zones in which cache nodes are created. If you are creating your cluster in an Amazon VPC you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group. The number of Availability Zones listed must equal the value of `num_cache_nodes`. If you want all the nodes in the same Availability Zone, use `availability_zone` instead, or repeat the Availability Zone multiple times in the list. Default: System chosen Availability Zones. Detecting drift of existing node availability zone is not currently supported. Updating this argument by itself to migrate existing node availability zones is not currently supported and will show a perpetual difference.
:param pulumi.Input[str] replication_group_id: The ID of the replication group to which this cluster should belong. If this parameter is specified, the cluster is added to the specified replication group as a read replica; otherwise, the cluster is a standalone primary that is not part of any replication group.
:param pulumi.Input[list] security_group_ids: One or more VPC security groups associated
with the cache cluster
:param pulumi.Input[list] security_group_names: List of security group
names to associate with this cache cluster
:param pulumi.Input[list] snapshot_arns: A single-element string list containing an
Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
:param pulumi.Input[str] snapshot_name: The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource.
:param pulumi.Input[float] snapshot_retention_limit: The number of days for which ElastiCache will
retain automatic cache cluster snapshots before deleting them. For example, if you set
SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes
:param pulumi.Input[str] snapshot_window: The daily time range (in UTC) during which ElastiCache will
begin taking a daily snapshot of your cache cluster. Example: 05:00-09:00
:param pulumi.Input[str] subnet_group_name: Name of the subnet group to be used
for the cache cluster.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource
The **cache_nodes** object supports the following:
* `address` (`pulumi.Input[str]`)
* `availability_zone` (`pulumi.Input[str]`) - The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone.
* `id` (`pulumi.Input[str]`)
* `port` (`pulumi.Input[float]`) - The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["apply_immediately"] = apply_immediately
__props__["arn"] = arn
__props__["availability_zone"] = availability_zone
__props__["az_mode"] = az_mode
__props__["cache_nodes"] = cache_nodes
__props__["cluster_address"] = cluster_address
__props__["cluster_id"] = cluster_id
__props__["configuration_endpoint"] = configuration_endpoint
__props__["engine"] = engine
__props__["engine_version"] = engine_version
__props__["maintenance_window"] = maintenance_window
__props__["node_type"] = node_type
__props__["notification_topic_arn"] = notification_topic_arn
__props__["num_cache_nodes"] = num_cache_nodes
__props__["parameter_group_name"] = parameter_group_name
__props__["port"] = port
__props__["preferred_availability_zones"] = preferred_availability_zones
__props__["replication_group_id"] = replication_group_id
__props__["security_group_ids"] = security_group_ids
__props__["security_group_names"] = security_group_names
__props__["snapshot_arns"] = snapshot_arns
__props__["snapshot_name"] = snapshot_name
__props__["snapshot_retention_limit"] = snapshot_retention_limit
__props__["snapshot_window"] = snapshot_window
__props__["subnet_group_name"] = subnet_group_name
__props__["tags"] = tags
return Cluster(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/elastictranscoder/pipeline.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Pipeline(pulumi.CustomResource):
arn: pulumi.Output[str]
aws_kms_key_arn: pulumi.Output[str]
"""
The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.
"""
content_config: pulumi.Output[dict]
"""
The ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. (documented below)
* `bucket` (`str`) - The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists.
* `storage_class` (`str`) - The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the files and playlists that it stores in your Amazon S3 bucket.
"""
content_config_permissions: pulumi.Output[list]
"""
The permissions for the `content_config` object. (documented below)
* `accesses` (`list`) - The permission that you want to give to the AWS user that you specified in `content_config_permissions.grantee`
* `grantee` (`str`) - The AWS user or group that you want to have access to transcoded files and playlists.
* `granteeType` (`str`) - Specify the type of value that appears in the `content_config_permissions.grantee` object. Valid values are `Canonical`, `Email` or `Group`.
"""
input_bucket: pulumi.Output[str]
"""
The Amazon S3 bucket in which you saved the media files that you want to transcode and the graphics that you want to use as watermarks.
"""
name: pulumi.Output[str]
"""
The name of the pipeline. Maximum 40 characters
"""
notifications: pulumi.Output[dict]
"""
The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status. (documented below)
* `completed` (`str`) - The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job in this pipeline.
* `error` (`str`) - The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition while processing a job in this pipeline.
* `progressing` (`str`) - The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process a job in this pipeline.
* `warning` (`str`) - The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition while processing a job in this pipeline.
"""
output_bucket: pulumi.Output[str]
"""
The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files.
"""
role: pulumi.Output[str]
"""
The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to transcode jobs for this pipeline.
"""
thumbnail_config: pulumi.Output[dict]
"""
The ThumbnailConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. (documented below)
* `bucket` (`str`) - The Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files.
* `storage_class` (`str`) - The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket.
"""
thumbnail_config_permissions: pulumi.Output[list]
"""
The permissions for the `thumbnail_config` object. (documented below)
* `accesses` (`list`) - The permission that you want to give to the AWS user that you specified in `thumbnail_config_permissions.grantee`.
* `grantee` (`str`) - The AWS user or group that you want to have access to thumbnail files.
* `granteeType` (`str`) - Specify the type of value that appears in the `thumbnail_config_permissions.grantee` object.
"""
def __init__(__self__, resource_name, opts=None, aws_kms_key_arn=None, content_config=None, content_config_permissions=None, input_bucket=None, name=None, notifications=None, output_bucket=None, role=None, thumbnail_config=None, thumbnail_config_permissions=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an Elastic Transcoder pipeline resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] aws_kms_key_arn: The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.
:param pulumi.Input[dict] content_config: The ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. (documented below)
:param pulumi.Input[list] content_config_permissions: The permissions for the `content_config` object. (documented below)
:param pulumi.Input[str] input_bucket: The Amazon S3 bucket in which you saved the media files that you want to transcode and the graphics that you want to use as watermarks.
:param pulumi.Input[str] name: The name of the pipeline. Maximum 40 characters
:param pulumi.Input[dict] notifications: The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status. (documented below)
:param pulumi.Input[str] output_bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files.
:param pulumi.Input[str] role: The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to transcode jobs for this pipeline.
:param pulumi.Input[dict] thumbnail_config: The ThumbnailConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. (documented below)
:param pulumi.Input[list] thumbnail_config_permissions: The permissions for the `thumbnail_config` object. (documented below)
The **content_config** object supports the following:
* `bucket` (`pulumi.Input[str]`) - The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists.
* `storage_class` (`pulumi.Input[str]`) - The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the files and playlists that it stores in your Amazon S3 bucket.
The **content_config_permissions** object supports the following:
* `accesses` (`pulumi.Input[list]`) - The permission that you want to give to the AWS user that you specified in `content_config_permissions.grantee`
* `grantee` (`pulumi.Input[str]`) - The AWS user or group that you want to have access to transcoded files and playlists.
* `granteeType` (`pulumi.Input[str]`) - Specify the type of value that appears in the `content_config_permissions.grantee` object. Valid values are `Canonical`, `Email` or `Group`.
The **notifications** object supports the following:
* `completed` (`pulumi.Input[str]`) - The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job in this pipeline.
* `error` (`pulumi.Input[str]`) - The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition while processing a job in this pipeline.
* `progressing` (`pulumi.Input[str]`) - The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process a job in this pipeline.
* `warning` (`pulumi.Input[str]`) - The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition while processing a job in this pipeline.
The **thumbnail_config** object supports the following:
* `bucket` (`pulumi.Input[str]`) - The Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files.
* `storage_class` (`pulumi.Input[str]`) - The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket.
The **thumbnail_config_permissions** object supports the following:
* `accesses` (`pulumi.Input[list]`) - The permission that you want to give to the AWS user that you specified in `thumbnail_config_permissions.grantee`.
* `grantee` (`pulumi.Input[str]`) - The AWS user or group that you want to have access to thumbnail files.
* `granteeType` (`pulumi.Input[str]`) - Specify the type of value that appears in the `thumbnail_config_permissions.grantee` object.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['aws_kms_key_arn'] = aws_kms_key_arn
__props__['content_config'] = content_config
__props__['content_config_permissions'] = content_config_permissions
if input_bucket is None:
raise TypeError("Missing required property 'input_bucket'")
__props__['input_bucket'] = input_bucket
__props__['name'] = name
__props__['notifications'] = notifications
__props__['output_bucket'] = output_bucket
if role is None:
raise TypeError("Missing required property 'role'")
__props__['role'] = role
__props__['thumbnail_config'] = thumbnail_config
__props__['thumbnail_config_permissions'] = thumbnail_config_permissions
__props__['arn'] = None
super(Pipeline, __self__).__init__(
'aws:elastictranscoder/pipeline:Pipeline',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, aws_kms_key_arn=None, content_config=None, content_config_permissions=None, input_bucket=None, name=None, notifications=None, output_bucket=None, role=None, thumbnail_config=None, thumbnail_config_permissions=None):
"""
Get an existing Pipeline resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] aws_kms_key_arn: The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.
:param pulumi.Input[dict] content_config: The ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. (documented below)
:param pulumi.Input[list] content_config_permissions: The permissions for the `content_config` object. (documented below)
:param pulumi.Input[str] input_bucket: The Amazon S3 bucket in which you saved the media files that you want to transcode and the graphics that you want to use as watermarks.
:param pulumi.Input[str] name: The name of the pipeline. Maximum 40 characters
:param pulumi.Input[dict] notifications: The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status. (documented below)
:param pulumi.Input[str] output_bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files.
:param pulumi.Input[str] role: The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to transcode jobs for this pipeline.
:param pulumi.Input[dict] thumbnail_config: The ThumbnailConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. (documented below)
:param pulumi.Input[list] thumbnail_config_permissions: The permissions for the `thumbnail_config` object. (documented below)
The **content_config** object supports the following:
* `bucket` (`pulumi.Input[str]`) - The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists.
* `storage_class` (`pulumi.Input[str]`) - The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the files and playlists that it stores in your Amazon S3 bucket.
The **content_config_permissions** object supports the following:
* `accesses` (`pulumi.Input[list]`) - The permission that you want to give to the AWS user that you specified in `content_config_permissions.grantee`
* `grantee` (`pulumi.Input[str]`) - The AWS user or group that you want to have access to transcoded files and playlists.
* `granteeType` (`pulumi.Input[str]`) - Specify the type of value that appears in the `content_config_permissions.grantee` object. Valid values are `Canonical`, `Email` or `Group`.
The **notifications** object supports the following:
* `completed` (`pulumi.Input[str]`) - The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job in this pipeline.
* `error` (`pulumi.Input[str]`) - The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition while processing a job in this pipeline.
* `progressing` (`pulumi.Input[str]`) - The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process a job in this pipeline.
* `warning` (`pulumi.Input[str]`) - The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition while processing a job in this pipeline.
The **thumbnail_config** object supports the following:
* `bucket` (`pulumi.Input[str]`) - The Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files.
* `storage_class` (`pulumi.Input[str]`) - The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket.
The **thumbnail_config_permissions** object supports the following:
* `accesses` (`pulumi.Input[list]`) - The permission that you want to give to the AWS user that you specified in `thumbnail_config_permissions.grantee`.
* `grantee` (`pulumi.Input[str]`) - The AWS user or group that you want to have access to thumbnail files.
* `granteeType` (`pulumi.Input[str]`) - Specify the type of value that appears in the `thumbnail_config_permissions.grantee` object.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["aws_kms_key_arn"] = aws_kms_key_arn
__props__["content_config"] = content_config
__props__["content_config_permissions"] = content_config_permissions
__props__["input_bucket"] = input_bucket
__props__["name"] = name
__props__["notifications"] = notifications
__props__["output_bucket"] = output_bucket
__props__["role"] = role
__props__["thumbnail_config"] = thumbnail_config
__props__["thumbnail_config_permissions"] = thumbnail_config_permissions
return Pipeline(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/elb/load_balancer_policy.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class LoadBalancerPolicy(pulumi.CustomResource):
load_balancer_name: pulumi.Output[str]
"""
The load balancer on which the policy is defined.
"""
policy_attributes: pulumi.Output[list]
"""
Policy attribute to apply to the policy.
* `name` (`str`)
* `value` (`str`)
"""
policy_name: pulumi.Output[str]
"""
The name of the load balancer policy.
"""
policy_type_name: pulumi.Output[str]
"""
The policy type.
"""
def __init__(__self__, resource_name, opts=None, load_balancer_name=None, policy_attributes=None, policy_name=None, policy_type_name=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a load balancer policy, which can be attached to an ELB listener or backend server.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] load_balancer_name: The load balancer on which the policy is defined.
:param pulumi.Input[list] policy_attributes: Policy attribute to apply to the policy.
:param pulumi.Input[str] policy_name: The name of the load balancer policy.
:param pulumi.Input[str] policy_type_name: The policy type.
The **policy_attributes** object supports the following:
* `name` (`pulumi.Input[str]`)
* `value` (`pulumi.Input[str]`)
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if load_balancer_name is None:
raise TypeError("Missing required property 'load_balancer_name'")
__props__['load_balancer_name'] = load_balancer_name
__props__['policy_attributes'] = policy_attributes
if policy_name is None:
raise TypeError("Missing required property 'policy_name'")
__props__['policy_name'] = policy_name
if policy_type_name is None:
raise TypeError("Missing required property 'policy_type_name'")
__props__['policy_type_name'] = policy_type_name
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="aws:elasticloadbalancing/loadBalancerPolicy:LoadBalancerPolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(LoadBalancerPolicy, __self__).__init__(
'aws:elb/loadBalancerPolicy:LoadBalancerPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, load_balancer_name=None, policy_attributes=None, policy_name=None, policy_type_name=None):
"""
Get an existing LoadBalancerPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] load_balancer_name: The load balancer on which the policy is defined.
:param pulumi.Input[list] policy_attributes: Policy attribute to apply to the policy.
:param pulumi.Input[str] policy_name: The name of the load balancer policy.
:param pulumi.Input[str] policy_type_name: The policy type.
The **policy_attributes** object supports the following:
* `name` (`pulumi.Input[str]`)
* `value` (`pulumi.Input[str]`)
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["load_balancer_name"] = load_balancer_name
__props__["policy_attributes"] = policy_attributes
__props__["policy_name"] = policy_name
__props__["policy_type_name"] = policy_type_name
return LoadBalancerPolicy(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/elb/load_balancer.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class LoadBalancer(pulumi.CustomResource):
access_logs: pulumi.Output[dict]
"""
An Access Logs block. Access Logs documented below.
* `bucket` (`str`) - The S3 bucket name to store the logs in.
* `bucket_prefix` (`str`) - The S3 bucket prefix. Logs are stored in the root if not configured.
* `enabled` (`bool`) - Boolean to enable / disable `access_logs`. Default is `true`
* `interval` (`float`) - The publishing interval in minutes. Default: 60 minutes.
"""
arn: pulumi.Output[str]
"""
The ARN of the ELB
"""
availability_zones: pulumi.Output[list]
"""
The AZ's to serve traffic in.
"""
connection_draining: pulumi.Output[bool]
"""
Boolean to enable connection draining. Default: `false`
"""
connection_draining_timeout: pulumi.Output[float]
"""
The time in seconds to allow for connections to drain. Default: `300`
"""
cross_zone_load_balancing: pulumi.Output[bool]
"""
Enable cross-zone load balancing. Default: `true`
"""
dns_name: pulumi.Output[str]
"""
The DNS name of the ELB
"""
health_check: pulumi.Output[dict]
"""
A health_check block. Health Check documented below.
* `healthyThreshold` (`float`) - The number of checks before the instance is declared healthy.
* `interval` (`float`) - The interval between checks.
* `target` (`str`) - The target of the check. Valid pattern is "${PROTOCOL}:${PORT}${PATH}", where PROTOCOL
values are:
* `HTTP`, `HTTPS` - PORT and PATH are required
* `TCP`, `SSL` - PORT is required, PATH is not supported
* `timeout` (`float`) - The length of time before the check times out.
* `unhealthyThreshold` (`float`) - The number of checks before the instance is declared unhealthy.
"""
idle_timeout: pulumi.Output[float]
"""
The time in seconds that the connection is allowed to be idle. Default: `60`
"""
instances: pulumi.Output[list]
"""
A list of instance ids to place in the ELB pool.
"""
internal: pulumi.Output[bool]
"""
If true, ELB will be an internal ELB.
"""
listeners: pulumi.Output[list]
"""
A list of listener blocks. Listeners documented below.
* `instance_port` (`float`) - The port on the instance to route to
* `instanceProtocol` (`str`) - The protocol to use to the instance. Valid
values are `HTTP`, `HTTPS`, `TCP`, or `SSL`
* `lb_port` (`float`) - The port to listen on for the load balancer
* `lbProtocol` (`str`) - The protocol to listen on. Valid values are `HTTP`,
`HTTPS`, `TCP`, or `SSL`
* `sslCertificateId` (`str`) - The ARN of an SSL certificate you have
uploaded to AWS IAM. **Note ECDSA-specific restrictions below. Only valid when `lb_protocol` is either HTTPS or SSL**
"""
name: pulumi.Output[str]
"""
The name of the ELB. By default generated by this provider.
"""
name_prefix: pulumi.Output[str]
"""
Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
"""
security_groups: pulumi.Output[list]
"""
A list of security group IDs to assign to the ELB.
Only valid if creating an ELB within a VPC
"""
source_security_group: pulumi.Output[str]
"""
The name of the security group that you can use as
part of your inbound rules for your load balancer's back-end application
instances. Use this for Classic or Default VPC only.
"""
source_security_group_id: pulumi.Output[str]
"""
The ID of the security group that you can use as
part of your inbound rules for your load balancer's back-end application
instances. Only available on ELBs launched in a VPC.
"""
subnets: pulumi.Output[list]
"""
A list of subnet IDs to attach to the ELB.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
zone_id: pulumi.Output[str]
"""
The canonical hosted zone ID of the ELB (to be used in a Route 53 Alias record)
"""
def __init__(__self__, resource_name, opts=None, access_logs=None, availability_zones=None, connection_draining=None, connection_draining_timeout=None, cross_zone_load_balancing=None, health_check=None, idle_timeout=None, instances=None, internal=None, listeners=None, name=None, name_prefix=None, security_groups=None, source_security_group=None, subnets=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an Elastic Load Balancer resource, also known as a "Classic
Load Balancer" after the release of
[Application/Network Load Balancers](https://www.terraform.io/docs/providers/aws/r/lb.html).
> **NOTE on ELB Instances and ELB Attachments:** This provider currently
provides both a standalone ELB Attachment resource
(describing an instance attached to an ELB), and an ELB resource with
`instances` defined in-line. At this time you cannot use an ELB with in-line
instances in conjunction with a ELB Attachment resources. Doing so will cause a
conflict and will overwrite attachments.
## Note on ECDSA Key Algorithm
If the ARN of the `ssl_certificate_id` that is pointed to references a
certificate that was signed by an ECDSA key, note that ELB only supports the
P256 and P384 curves. Using a certificate signed by a key using a different
curve could produce the error `ERR_SSL_VERSION_OR_CIPHER_MISMATCH` in your
browser.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] access_logs: An Access Logs block. Access Logs documented below.
:param pulumi.Input[list] availability_zones: The AZ's to serve traffic in.
:param pulumi.Input[bool] connection_draining: Boolean to enable connection draining. Default: `false`
:param pulumi.Input[float] connection_draining_timeout: The time in seconds to allow for connections to drain. Default: `300`
:param pulumi.Input[bool] cross_zone_load_balancing: Enable cross-zone load balancing. Default: `true`
:param pulumi.Input[dict] health_check: A health_check block. Health Check documented below.
:param pulumi.Input[float] idle_timeout: The time in seconds that the connection is allowed to be idle. Default: `60`
:param pulumi.Input[list] instances: A list of instance ids to place in the ELB pool.
:param pulumi.Input[bool] internal: If true, ELB will be an internal ELB.
:param pulumi.Input[list] listeners: A list of listener blocks. Listeners documented below.
:param pulumi.Input[str] name: The name of the ELB. By default generated by this provider.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
:param pulumi.Input[list] security_groups: A list of security group IDs to assign to the ELB.
Only valid if creating an ELB within a VPC
:param pulumi.Input[str] source_security_group: The name of the security group that you can use as
part of your inbound rules for your load balancer's back-end application
instances. Use this for Classic or Default VPC only.
:param pulumi.Input[list] subnets: A list of subnet IDs to attach to the ELB.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
The **access_logs** object supports the following:
* `bucket` (`pulumi.Input[str]`) - The S3 bucket name to store the logs in.
* `bucket_prefix` (`pulumi.Input[str]`) - The S3 bucket prefix. Logs are stored in the root if not configured.
* `enabled` (`pulumi.Input[bool]`) - Boolean to enable / disable `access_logs`. Default is `true`
* `interval` (`pulumi.Input[float]`) - The publishing interval in minutes. Default: 60 minutes.
The **health_check** object supports the following:
* `healthyThreshold` (`pulumi.Input[float]`) - The number of checks before the instance is declared healthy.
* `interval` (`pulumi.Input[float]`) - The interval between checks.
* `target` (`pulumi.Input[str]`) - The target of the check. Valid pattern is "${PROTOCOL}:${PORT}${PATH}", where PROTOCOL
values are:
* `HTTP`, `HTTPS` - PORT and PATH are required
* `TCP`, `SSL` - PORT is required, PATH is not supported
* `timeout` (`pulumi.Input[float]`) - The length of time before the check times out.
* `unhealthyThreshold` (`pulumi.Input[float]`) - The number of checks before the instance is declared unhealthy.
The **listeners** object supports the following:
* `instance_port` (`pulumi.Input[float]`) - The port on the instance to route to
* `instanceProtocol` (`pulumi.Input[str]`) - The protocol to use to the instance. Valid
values are `HTTP`, `HTTPS`, `TCP`, or `SSL`
* `lb_port` (`pulumi.Input[float]`) - The port to listen on for the load balancer
* `lbProtocol` (`pulumi.Input[str]`) - The protocol to listen on. Valid values are `HTTP`,
`HTTPS`, `TCP`, or `SSL`
* `sslCertificateId` (`pulumi.Input[str]`) - The ARN of an SSL certificate you have
uploaded to AWS IAM. **Note ECDSA-specific restrictions below. Only valid when `lb_protocol` is either HTTPS or SSL**
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['access_logs'] = access_logs
__props__['availability_zones'] = availability_zones
__props__['connection_draining'] = connection_draining
__props__['connection_draining_timeout'] = connection_draining_timeout
__props__['cross_zone_load_balancing'] = cross_zone_load_balancing
__props__['health_check'] = health_check
__props__['idle_timeout'] = idle_timeout
__props__['instances'] = instances
__props__['internal'] = internal
if listeners is None:
raise TypeError("Missing required property 'listeners'")
__props__['listeners'] = listeners
__props__['name'] = name
__props__['name_prefix'] = name_prefix
__props__['security_groups'] = security_groups
__props__['source_security_group'] = source_security_group
__props__['subnets'] = subnets
__props__['tags'] = tags
__props__['arn'] = None
__props__['dns_name'] = None
__props__['source_security_group_id'] = None
__props__['zone_id'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="aws:elasticloadbalancing/loadBalancer:LoadBalancer")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(LoadBalancer, __self__).__init__(
'aws:elb/loadBalancer:LoadBalancer',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, access_logs=None, arn=None, availability_zones=None, connection_draining=None, connection_draining_timeout=None, cross_zone_load_balancing=None, dns_name=None, health_check=None, idle_timeout=None, instances=None, internal=None, listeners=None, name=None, name_prefix=None, security_groups=None, source_security_group=None, source_security_group_id=None, subnets=None, tags=None, zone_id=None):
"""
Get an existing LoadBalancer resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] access_logs: An Access Logs block. Access Logs documented below.
:param pulumi.Input[str] arn: The ARN of the ELB
:param pulumi.Input[list] availability_zones: The AZ's to serve traffic in.
:param pulumi.Input[bool] connection_draining: Boolean to enable connection draining. Default: `false`
:param pulumi.Input[float] connection_draining_timeout: The time in seconds to allow for connections to drain. Default: `300`
:param pulumi.Input[bool] cross_zone_load_balancing: Enable cross-zone load balancing. Default: `true`
:param pulumi.Input[str] dns_name: The DNS name of the ELB
:param pulumi.Input[dict] health_check: A health_check block. Health Check documented below.
:param pulumi.Input[float] idle_timeout: The time in seconds that the connection is allowed to be idle. Default: `60`
:param pulumi.Input[list] instances: A list of instance ids to place in the ELB pool.
:param pulumi.Input[bool] internal: If true, ELB will be an internal ELB.
:param pulumi.Input[list] listeners: A list of listener blocks. Listeners documented below.
:param pulumi.Input[str] name: The name of the ELB. By default generated by this provider.
:param pulumi.Input[str] name_prefix: Creates a unique name beginning with the specified
prefix. Conflicts with `name`.
:param pulumi.Input[list] security_groups: A list of security group IDs to assign to the ELB.
Only valid if creating an ELB within a VPC
:param pulumi.Input[str] source_security_group: The name of the security group that you can use as
part of your inbound rules for your load balancer's back-end application
instances. Use this for Classic or Default VPC only.
:param pulumi.Input[str] source_security_group_id: The ID of the security group that you can use as
part of your inbound rules for your load balancer's back-end application
instances. Only available on ELBs launched in a VPC.
:param pulumi.Input[list] subnets: A list of subnet IDs to attach to the ELB.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] zone_id: The canonical hosted zone ID of the ELB (to be used in a Route 53 Alias record)
The **access_logs** object supports the following:
* `bucket` (`pulumi.Input[str]`) - The S3 bucket name to store the logs in.
* `bucket_prefix` (`pulumi.Input[str]`) - The S3 bucket prefix. Logs are stored in the root if not configured.
* `enabled` (`pulumi.Input[bool]`) - Boolean to enable / disable `access_logs`. Default is `true`
* `interval` (`pulumi.Input[float]`) - The publishing interval in minutes. Default: 60 minutes.
The **health_check** object supports the following:
* `healthyThreshold` (`pulumi.Input[float]`) - The number of checks before the instance is declared healthy.
* `interval` (`pulumi.Input[float]`) - The interval between checks.
* `target` (`pulumi.Input[str]`) - The target of the check. Valid pattern is "${PROTOCOL}:${PORT}${PATH}", where PROTOCOL
values are:
* `HTTP`, `HTTPS` - PORT and PATH are required
* `TCP`, `SSL` - PORT is required, PATH is not supported
* `timeout` (`pulumi.Input[float]`) - The length of time before the check times out.
* `unhealthyThreshold` (`pulumi.Input[float]`) - The number of checks before the instance is declared unhealthy.
The **listeners** object supports the following:
* `instance_port` (`pulumi.Input[float]`) - The port on the instance to route to
* `instanceProtocol` (`pulumi.Input[str]`) - The protocol to use to the instance. Valid
values are `HTTP`, `HTTPS`, `TCP`, or `SSL`
* `lb_port` (`pulumi.Input[float]`) - The port to listen on for the load balancer
* `lbProtocol` (`pulumi.Input[str]`) - The protocol to listen on. Valid values are `HTTP`,
`HTTPS`, `TCP`, or `SSL`
* `sslCertificateId` (`pulumi.Input[str]`) - The ARN of an SSL certificate you have
uploaded to AWS IAM. **Note ECDSA-specific restrictions below. Only valid when `lb_protocol` is either HTTPS or SSL**
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["access_logs"] = access_logs
__props__["arn"] = arn
__props__["availability_zones"] = availability_zones
__props__["connection_draining"] = connection_draining
__props__["connection_draining_timeout"] = connection_draining_timeout
__props__["cross_zone_load_balancing"] = cross_zone_load_balancing
__props__["dns_name"] = dns_name
__props__["health_check"] = health_check
__props__["idle_timeout"] = idle_timeout
__props__["instances"] = instances
__props__["internal"] = internal
__props__["listeners"] = listeners
__props__["name"] = name
__props__["name_prefix"] = name_prefix
__props__["security_groups"] = security_groups
__props__["source_security_group"] = source_security_group
__props__["source_security_group_id"] = source_security_group_id
__props__["subnets"] = subnets
__props__["tags"] = tags
__props__["zone_id"] = zone_id
return LoadBalancer(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/globalaccelerator/accelerator.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Accelerator(pulumi.CustomResource):
attributes: pulumi.Output[dict]
"""
The attributes of the accelerator. Fields documented below.
* `flowLogsEnabled` (`bool`) - Indicates whether flow logs are enabled.
* `flowLogsS3Bucket` (`str`) - The name of the Amazon S3 bucket for the flow logs.
* `flowLogsS3Prefix` (`str`) - The prefix for the location in the Amazon S3 bucket for the flow logs.
"""
dns_name: pulumi.Output[str]
"""
The DNS name of the accelerator. For example, `a5d53ff5ee6bca4ce.awsglobalaccelerator.com`.
* `hosted_zone_id` -- The Global Accelerator Route 53 zone ID that can be used to
route an [Alias Resource Record Set][1] to the Global Accelerator. This attribute
is simply an alias for the zone ID `Z2BJ6XQ5FK7U4H`.
"""
enabled: pulumi.Output[bool]
"""
Indicates whether the accelerator is enabled. The value is true or false. The default value is true.
"""
hosted_zone_id: pulumi.Output[str]
ip_address_type: pulumi.Output[str]
"""
The value for the address type must be `IPV4`.
"""
ip_sets: pulumi.Output[list]
"""
IP address set associated with the accelerator.
* `ip_addresses` (`list`) - A list of IP addresses in the IP address set.
* `ipFamily` (`str`) - The types of IP addresses included in this IP set.
"""
name: pulumi.Output[str]
"""
The name of the accelerator.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
def __init__(__self__, resource_name, opts=None, attributes=None, enabled=None, ip_address_type=None, name=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Creates a Global Accelerator accelerator.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] attributes: The attributes of the accelerator. Fields documented below.
:param pulumi.Input[bool] enabled: Indicates whether the accelerator is enabled. The value is true or false. The default value is true.
:param pulumi.Input[str] ip_address_type: The value for the address type must be `IPV4`.
:param pulumi.Input[str] name: The name of the accelerator.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
The **attributes** object supports the following:
* `flowLogsEnabled` (`pulumi.Input[bool]`) - Indicates whether flow logs are enabled.
* `flowLogsS3Bucket` (`pulumi.Input[str]`) - The name of the Amazon S3 bucket for the flow logs.
* `flowLogsS3Prefix` (`pulumi.Input[str]`) - The prefix for the location in the Amazon S3 bucket for the flow logs.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['attributes'] = attributes
__props__['enabled'] = enabled
__props__['ip_address_type'] = ip_address_type
__props__['name'] = name
__props__['tags'] = tags
__props__['dns_name'] = None
__props__['hosted_zone_id'] = None
__props__['ip_sets'] = None
super(Accelerator, __self__).__init__(
'aws:globalaccelerator/accelerator:Accelerator',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, attributes=None, dns_name=None, enabled=None, hosted_zone_id=None, ip_address_type=None, ip_sets=None, name=None, tags=None):
"""
Get an existing Accelerator resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] attributes: The attributes of the accelerator. Fields documented below.
:param pulumi.Input[str] dns_name: The DNS name of the accelerator. For example, `a5d53ff5ee6bca4ce.awsglobalaccelerator.com`.
* `hosted_zone_id` -- The Global Accelerator Route 53 zone ID that can be used to
route an [Alias Resource Record Set][1] to the Global Accelerator. This attribute
is simply an alias for the zone ID `Z2BJ6XQ5FK7U4H`.
:param pulumi.Input[bool] enabled: Indicates whether the accelerator is enabled. The value is true or false. The default value is true.
:param pulumi.Input[str] ip_address_type: The value for the address type must be `IPV4`.
:param pulumi.Input[list] ip_sets: IP address set associated with the accelerator.
:param pulumi.Input[str] name: The name of the accelerator.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
The **attributes** object supports the following:
* `flowLogsEnabled` (`pulumi.Input[bool]`) - Indicates whether flow logs are enabled.
* `flowLogsS3Bucket` (`pulumi.Input[str]`) - The name of the Amazon S3 bucket for the flow logs.
* `flowLogsS3Prefix` (`pulumi.Input[str]`) - The prefix for the location in the Amazon S3 bucket for the flow logs.
The **ip_sets** object supports the following:
* `ip_addresses` (`pulumi.Input[list]`) - A list of IP addresses in the IP address set.
* `ipFamily` (`pulumi.Input[str]`) - The types of IP addresses included in this IP set.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["attributes"] = attributes
__props__["dns_name"] = dns_name
__props__["enabled"] = enabled
__props__["hosted_zone_id"] = hosted_zone_id
__props__["ip_address_type"] = ip_address_type
__props__["ip_sets"] = ip_sets
__props__["name"] = name
__props__["tags"] = tags
return Accelerator(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/globalaccelerator/endpoint_group.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class EndpointGroup(pulumi.CustomResource):
endpoint_configurations: pulumi.Output[list]
"""
The list of endpoint objects. Fields documented below.
* `endpoint_id` (`str`) - An ID for the endpoint. If the endpoint is a Network Load Balancer or Application Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If the endpoint is an Elastic IP address, this is the Elastic IP address allocation ID.
* `weight` (`float`) - The weight associated with the endpoint. When you add weights to endpoints, you configure AWS Global Accelerator to route traffic based on proportions that you specify.
"""
endpoint_group_region: pulumi.Output[str]
"""
The name of the AWS Region where the endpoint group is located.
"""
health_check_interval_seconds: pulumi.Output[float]
"""
The time—10 seconds or 30 seconds—between each health check for an endpoint. The default value is 30.
"""
health_check_path: pulumi.Output[str]
"""
If the protocol is HTTP/S, then this specifies the path that is the destination for health check targets. The default value is slash (/).
"""
health_check_port: pulumi.Output[float]
"""
The port that AWS Global Accelerator uses to check the health of endpoints that are part of this endpoint group. The default port is the listener port that this endpoint group is associated with. If listener port is a list of ports, Global Accelerator uses the first port in the list.
"""
health_check_protocol: pulumi.Output[str]
"""
The protocol that AWS Global Accelerator uses to check the health of endpoints that are part of this endpoint group. The default value is TCP.
"""
listener_arn: pulumi.Output[str]
"""
The Amazon Resource Name (ARN) of the listener.
"""
threshold_count: pulumi.Output[float]
"""
The number of consecutive health checks required to set the state of a healthy endpoint to unhealthy, or to set an unhealthy endpoint to healthy. The default value is 3.
"""
traffic_dial_percentage: pulumi.Output[float]
"""
The percentage of traffic to send to an AWS Region. Additional traffic is distributed to other endpoint groups for this listener. The default value is 100.
"""
def __init__(__self__, resource_name, opts=None, endpoint_configurations=None, endpoint_group_region=None, health_check_interval_seconds=None, health_check_path=None, health_check_port=None, health_check_protocol=None, listener_arn=None, threshold_count=None, traffic_dial_percentage=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Global Accelerator endpoint group.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] endpoint_configurations: The list of endpoint objects. Fields documented below.
:param pulumi.Input[str] endpoint_group_region: The name of the AWS Region where the endpoint group is located.
:param pulumi.Input[float] health_check_interval_seconds: The time—10 seconds or 30 seconds—between each health check for an endpoint. The default value is 30.
:param pulumi.Input[str] health_check_path: If the protocol is HTTP/S, then this specifies the path that is the destination for health check targets. The default value is slash (/).
:param pulumi.Input[float] health_check_port: The port that AWS Global Accelerator uses to check the health of endpoints that are part of this endpoint group. The default port is the listener port that this endpoint group is associated with. If listener port is a list of ports, Global Accelerator uses the first port in the list.
:param pulumi.Input[str] health_check_protocol: The protocol that AWS Global Accelerator uses to check the health of endpoints that are part of this endpoint group. The default value is TCP.
:param pulumi.Input[str] listener_arn: The Amazon Resource Name (ARN) of the listener.
:param pulumi.Input[float] threshold_count: The number of consecutive health checks required to set the state of a healthy endpoint to unhealthy, or to set an unhealthy endpoint to healthy. The default value is 3.
:param pulumi.Input[float] traffic_dial_percentage: The percentage of traffic to send to an AWS Region. Additional traffic is distributed to other endpoint groups for this listener. The default value is 100.
The **endpoint_configurations** object supports the following:
* `endpoint_id` (`pulumi.Input[str]`) - An ID for the endpoint. If the endpoint is a Network Load Balancer or Application Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If the endpoint is an Elastic IP address, this is the Elastic IP address allocation ID.
* `weight` (`pulumi.Input[float]`) - The weight associated with the endpoint. When you add weights to endpoints, you configure AWS Global Accelerator to route traffic based on proportions that you specify.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['endpoint_configurations'] = endpoint_configurations
__props__['endpoint_group_region'] = endpoint_group_region
__props__['health_check_interval_seconds'] = health_check_interval_seconds
__props__['health_check_path'] = health_check_path
__props__['health_check_port'] = health_check_port
__props__['health_check_protocol'] = health_check_protocol
if listener_arn is None:
raise TypeError("Missing required property 'listener_arn'")
__props__['listener_arn'] = listener_arn
__props__['threshold_count'] = threshold_count
__props__['traffic_dial_percentage'] = traffic_dial_percentage
super(EndpointGroup, __self__).__init__(
'aws:globalaccelerator/endpointGroup:EndpointGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, endpoint_configurations=None, endpoint_group_region=None, health_check_interval_seconds=None, health_check_path=None, health_check_port=None, health_check_protocol=None, listener_arn=None, threshold_count=None, traffic_dial_percentage=None):
"""
Get an existing EndpointGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] endpoint_configurations: The list of endpoint objects. Fields documented below.
:param pulumi.Input[str] endpoint_group_region: The name of the AWS Region where the endpoint group is located.
:param pulumi.Input[float] health_check_interval_seconds: The time—10 seconds or 30 seconds—between each health check for an endpoint. The default value is 30.
:param pulumi.Input[str] health_check_path: If the protocol is HTTP/S, then this specifies the path that is the destination for health check targets. The default value is slash (/).
:param pulumi.Input[float] health_check_port: The port that AWS Global Accelerator uses to check the health of endpoints that are part of this endpoint group. The default port is the listener port that this endpoint group is associated with. If listener port is a list of ports, Global Accelerator uses the first port in the list.
:param pulumi.Input[str] health_check_protocol: The protocol that AWS Global Accelerator uses to check the health of endpoints that are part of this endpoint group. The default value is TCP.
:param pulumi.Input[str] listener_arn: The Amazon Resource Name (ARN) of the listener.
:param pulumi.Input[float] threshold_count: The number of consecutive health checks required to set the state of a healthy endpoint to unhealthy, or to set an unhealthy endpoint to healthy. The default value is 3.
:param pulumi.Input[float] traffic_dial_percentage: The percentage of traffic to send to an AWS Region. Additional traffic is distributed to other endpoint groups for this listener. The default value is 100.
The **endpoint_configurations** object supports the following:
* `endpoint_id` (`pulumi.Input[str]`) - An ID for the endpoint. If the endpoint is a Network Load Balancer or Application Load Balancer, this is the Amazon Resource Name (ARN) of the resource. If the endpoint is an Elastic IP address, this is the Elastic IP address allocation ID.
* `weight` (`pulumi.Input[float]`) - The weight associated with the endpoint. When you add weights to endpoints, you configure AWS Global Accelerator to route traffic based on proportions that you specify.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["endpoint_configurations"] = endpoint_configurations
__props__["endpoint_group_region"] = endpoint_group_region
__props__["health_check_interval_seconds"] = health_check_interval_seconds
__props__["health_check_path"] = health_check_path
__props__["health_check_port"] = health_check_port
__props__["health_check_protocol"] = health_check_protocol
__props__["listener_arn"] = listener_arn
__props__["threshold_count"] = threshold_count
__props__["traffic_dial_percentage"] = traffic_dial_percentage
return EndpointGroup(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/glue/connection.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Connection(pulumi.CustomResource):
catalog_id: pulumi.Output[str]
"""
The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default.
"""
connection_properties: pulumi.Output[dict]
"""
A map of key-value pairs used as parameters for this connection.
"""
connection_type: pulumi.Output[str]
"""
The type of the connection. Defaults to `JBDC`.
"""
description: pulumi.Output[str]
"""
Description of the connection.
"""
match_criterias: pulumi.Output[list]
"""
A list of criteria that can be used in selecting this connection.
"""
name: pulumi.Output[str]
"""
The name of the connection.
"""
physical_connection_requirements: pulumi.Output[dict]
"""
A map of physical connection requirements, such as VPC and SecurityGroup. Defined below.
* `availability_zone` (`str`) - The availability zone of the connection. This field is redundant and implied by `subnet_id`, but is currently an api requirement.
* `securityGroupIdLists` (`list`) - The security group ID list used by the connection.
* `subnet_id` (`str`) - The subnet ID used by the connection.
"""
def __init__(__self__, resource_name, opts=None, catalog_id=None, connection_properties=None, connection_type=None, description=None, match_criterias=None, name=None, physical_connection_requirements=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Glue Connection resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] catalog_id: The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default.
:param pulumi.Input[dict] connection_properties: A map of key-value pairs used as parameters for this connection.
:param pulumi.Input[str] connection_type: The type of the connection. Defaults to `JBDC`.
:param pulumi.Input[str] description: Description of the connection.
:param pulumi.Input[list] match_criterias: A list of criteria that can be used in selecting this connection.
:param pulumi.Input[str] name: The name of the connection.
:param pulumi.Input[dict] physical_connection_requirements: A map of physical connection requirements, such as VPC and SecurityGroup. Defined below.
The **physical_connection_requirements** object supports the following:
* `availability_zone` (`pulumi.Input[str]`) - The availability zone of the connection. This field is redundant and implied by `subnet_id`, but is currently an api requirement.
* `securityGroupIdLists` (`pulumi.Input[list]`) - The security group ID list used by the connection.
* `subnet_id` (`pulumi.Input[str]`) - The subnet ID used by the connection.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['catalog_id'] = catalog_id
if connection_properties is None:
raise TypeError("Missing required property 'connection_properties'")
__props__['connection_properties'] = connection_properties
__props__['connection_type'] = connection_type
__props__['description'] = description
__props__['match_criterias'] = match_criterias
__props__['name'] = name
__props__['physical_connection_requirements'] = physical_connection_requirements
super(Connection, __self__).__init__(
'aws:glue/connection:Connection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, catalog_id=None, connection_properties=None, connection_type=None, description=None, match_criterias=None, name=None, physical_connection_requirements=None):
"""
Get an existing Connection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] catalog_id: The ID of the Data Catalog in which to create the connection. If none is supplied, the AWS account ID is used by default.
:param pulumi.Input[dict] connection_properties: A map of key-value pairs used as parameters for this connection.
:param pulumi.Input[str] connection_type: The type of the connection. Defaults to `JBDC`.
:param pulumi.Input[str] description: Description of the connection.
:param pulumi.Input[list] match_criterias: A list of criteria that can be used in selecting this connection.
:param pulumi.Input[str] name: The name of the connection.
:param pulumi.Input[dict] physical_connection_requirements: A map of physical connection requirements, such as VPC and SecurityGroup. Defined below.
The **physical_connection_requirements** object supports the following:
* `availability_zone` (`pulumi.Input[str]`) - The availability zone of the connection. This field is redundant and implied by `subnet_id`, but is currently an api requirement.
* `securityGroupIdLists` (`pulumi.Input[list]`) - The security group ID list used by the connection.
* `subnet_id` (`pulumi.Input[str]`) - The subnet ID used by the connection.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["catalog_id"] = catalog_id
__props__["connection_properties"] = connection_properties
__props__["connection_type"] = connection_type
__props__["description"] = description
__props__["match_criterias"] = match_criterias
__props__["name"] = name
__props__["physical_connection_requirements"] = physical_connection_requirements
return Connection(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/iam/get_server_certificate.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetServerCertificateResult:
"""
A collection of values returned by getServerCertificate.
"""
def __init__(__self__, arn=None, certificate_body=None, certificate_chain=None, expiration_date=None, id=None, latest=None, name=None, name_prefix=None, path=None, path_prefix=None, upload_date=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
__self__.arn = arn
if certificate_body and not isinstance(certificate_body, str):
raise TypeError("Expected argument 'certificate_body' to be a str")
__self__.certificate_body = certificate_body
if certificate_chain and not isinstance(certificate_chain, str):
raise TypeError("Expected argument 'certificate_chain' to be a str")
__self__.certificate_chain = certificate_chain
if expiration_date and not isinstance(expiration_date, str):
raise TypeError("Expected argument 'expiration_date' to be a str")
__self__.expiration_date = expiration_date
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
if latest and not isinstance(latest, bool):
raise TypeError("Expected argument 'latest' to be a bool")
__self__.latest = latest
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if name_prefix and not isinstance(name_prefix, str):
raise TypeError("Expected argument 'name_prefix' to be a str")
__self__.name_prefix = name_prefix
if path and not isinstance(path, str):
raise TypeError("Expected argument 'path' to be a str")
__self__.path = path
if path_prefix and not isinstance(path_prefix, str):
raise TypeError("Expected argument 'path_prefix' to be a str")
__self__.path_prefix = path_prefix
if upload_date and not isinstance(upload_date, str):
raise TypeError("Expected argument 'upload_date' to be a str")
__self__.upload_date = upload_date
class AwaitableGetServerCertificateResult(GetServerCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServerCertificateResult(
arn=self.arn,
certificate_body=self.certificate_body,
certificate_chain=self.certificate_chain,
expiration_date=self.expiration_date,
id=self.id,
latest=self.latest,
name=self.name,
name_prefix=self.name_prefix,
path=self.path,
path_prefix=self.path_prefix,
upload_date=self.upload_date)
def get_server_certificate(latest=None,name=None,name_prefix=None,path_prefix=None,opts=None):
"""
Use this data source to lookup information about IAM Server Certificates.
## Import
The import function will read in certificate body, certificate chain (if it exists), id, name, path, and arn.
It will not retrieve the private key which is not available through the AWS API.
:param bool latest: sort results by expiration date. returns the certificate with expiration date in furthest in the future.
:param str name: exact name of the cert to lookup
:param str name_prefix: prefix of cert to filter by
:param str path_prefix: prefix of path to filter by
"""
__args__ = dict()
__args__['latest'] = latest
__args__['name'] = name
__args__['namePrefix'] = name_prefix
__args__['pathPrefix'] = path_prefix
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:iam/getServerCertificate:getServerCertificate', __args__, opts=opts).value
return AwaitableGetServerCertificateResult(
arn=__ret__.get('arn'),
certificate_body=__ret__.get('certificateBody'),
certificate_chain=__ret__.get('certificateChain'),
expiration_date=__ret__.get('expirationDate'),
id=__ret__.get('id'),
latest=__ret__.get('latest'),
name=__ret__.get('name'),
name_prefix=__ret__.get('namePrefix'),
path=__ret__.get('path'),
path_prefix=__ret__.get('pathPrefix'),
upload_date=__ret__.get('uploadDate'))
```
#### File: pulumi_aws/lambda_/function.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Function(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The Amazon Resource Name (ARN) identifying your Lambda Function.
"""
dead_letter_config: pulumi.Output[dict]
"""
Nested block to configure the function's *dead letter queue*. See details below.
* `target_arn` (`str`) - The ARN of an SNS topic or SQS queue to notify when an invocation fails. If this
option is used, the function's IAM role must be granted suitable access to write to the target object,
which means allowing either the `sns:Publish` or `sqs:SendMessage` action on this ARN, depending on
which service is targeted.
"""
description: pulumi.Output[str]
"""
Description of what your Lambda Function does.
"""
environment: pulumi.Output[dict]
"""
The Lambda environment's configuration settings. Fields documented below.
* `variables` (`dict`) - A map that defines environment variables for the Lambda function.
"""
code: pulumi.Output[pulumi.Archive]
"""
The path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options cannot be used.
"""
name: pulumi.Output[str]
"""
A unique name for your Lambda Function.
"""
handler: pulumi.Output[str]
"""
The function [entrypoint][3] in your code.
"""
invoke_arn: pulumi.Output[str]
"""
The ARN to be used for invoking Lambda Function from API Gateway - to be used in [`apigateway.Integration`](https://www.terraform.io/docs/providers/aws/r/api_gateway_integration.html)'s `uri`
"""
kms_key_arn: pulumi.Output[str]
"""
The ARN for the KMS encryption key.
"""
last_modified: pulumi.Output[str]
"""
The date this resource was last modified.
"""
layers: pulumi.Output[list]
"""
List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See [Lambda Layers][10]
"""
memory_size: pulumi.Output[float]
"""
Amount of memory in MB your Lambda Function can use at runtime. Defaults to `128`. See [Limits][5]
"""
publish: pulumi.Output[bool]
"""
Whether to publish creation/change as new Lambda Function Version. Defaults to `false`.
"""
qualified_arn: pulumi.Output[str]
"""
The Amazon Resource Name (ARN) identifying your Lambda Function Version
(if versioning is enabled via `publish = true`).
"""
reserved_concurrent_executions: pulumi.Output[float]
"""
The amount of reserved concurrent executions for this lambda function. A value of `0` disables lambda from being triggered and `-1` removes any concurrency limitations. Defaults to Unreserved Concurrency Limits `-1`. See [Managing Concurrency][9]
"""
role: pulumi.Output[str]
"""
IAM role attached to the Lambda Function. This governs both who / what can invoke your Lambda Function, as well as what resources our Lambda Function has access to. See [Lambda Permission Model][4] for more details.
"""
runtime: pulumi.Output[str]
"""
See [Runtimes][6] for valid values.
"""
s3_bucket: pulumi.Output[str]
"""
The S3 bucket location containing the function's deployment package. Conflicts with `filename`. This bucket must reside in the same AWS region where you are creating the Lambda function.
"""
s3_key: pulumi.Output[str]
"""
The S3 key of an object containing the function's deployment package. Conflicts with `filename`.
"""
s3_object_version: pulumi.Output[str]
"""
The object version containing the function's deployment package. Conflicts with `filename`.
"""
source_code_hash: pulumi.Output[str]
"""
Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3_key`. The usual way to set this is `filebase64sha256("file.zip")` (this provider 0.11.12 and later) or `base64sha256(file("file.zip"))` (this provider 0.11.11 and earlier), where "file.zip" is the local filename of the lambda function source archive.
"""
source_code_size: pulumi.Output[float]
"""
The size in bytes of the function .zip file.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the object.
"""
timeout: pulumi.Output[float]
"""
The amount of time your Lambda Function has to run in seconds. Defaults to `3`. See [Limits][5]
"""
tracing_config: pulumi.Output[dict]
version: pulumi.Output[str]
"""
Latest published version of your Lambda Function.
"""
vpc_config: pulumi.Output[dict]
"""
Provide this to allow your function to access your VPC. Fields documented below. See [Lambda in VPC][7]
* `security_group_ids` (`list`) - A list of security group IDs associated with the Lambda function.
* `subnet_ids` (`list`) - A list of subnet IDs associated with the Lambda function.
* `vpc_id` (`str`)
"""
def __init__(__self__, resource_name, opts=None, dead_letter_config=None, description=None, environment=None, code=None, name=None, handler=None, kms_key_arn=None, layers=None, memory_size=None, publish=None, reserved_concurrent_executions=None, role=None, runtime=None, s3_bucket=None, s3_key=None, s3_object_version=None, source_code_hash=None, tags=None, timeout=None, tracing_config=None, vpc_config=None, __props__=None, __name__=None, __opts__=None):
"""
Create a Function resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] dead_letter_config: Nested block to configure the function's *dead letter queue*. See details below.
:param pulumi.Input[str] description: Description of what your Lambda Function does.
:param pulumi.Input[dict] environment: The Lambda environment's configuration settings. Fields documented below.
:param pulumi.Input[pulumi.Archive] code: The path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options cannot be used.
:param pulumi.Input[str] name: A unique name for your Lambda Function.
:param pulumi.Input[str] handler: The function [entrypoint][3] in your code.
:param pulumi.Input[str] kms_key_arn: The ARN for the KMS encryption key.
:param pulumi.Input[list] layers: List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See [Lambda Layers][10]
:param pulumi.Input[float] memory_size: Amount of memory in MB your Lambda Function can use at runtime. Defaults to `128`. See [Limits][5]
:param pulumi.Input[bool] publish: Whether to publish creation/change as new Lambda Function Version. Defaults to `false`.
:param pulumi.Input[float] reserved_concurrent_executions: The amount of reserved concurrent executions for this lambda function. A value of `0` disables lambda from being triggered and `-1` removes any concurrency limitations. Defaults to Unreserved Concurrency Limits `-1`. See [Managing Concurrency][9]
:param pulumi.Input[str] role: IAM role attached to the Lambda Function. This governs both who / what can invoke your Lambda Function, as well as what resources our Lambda Function has access to. See [Lambda Permission Model][4] for more details.
:param pulumi.Input[str] runtime: See [Runtimes][6] for valid values.
:param pulumi.Input[str] s3_bucket: The S3 bucket location containing the function's deployment package. Conflicts with `filename`. This bucket must reside in the same AWS region where you are creating the Lambda function.
:param pulumi.Input[str] s3_key: The S3 key of an object containing the function's deployment package. Conflicts with `filename`.
:param pulumi.Input[str] s3_object_version: The object version containing the function's deployment package. Conflicts with `filename`.
:param pulumi.Input[str] source_code_hash: Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3_key`. The usual way to set this is `filebase64sha256("file.zip")` (this provider 0.11.12 and later) or `base64sha256(file("file.zip"))` (this provider 0.11.11 and earlier), where "file.zip" is the local filename of the lambda function source archive.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the object.
:param pulumi.Input[float] timeout: The amount of time your Lambda Function has to run in seconds. Defaults to `3`. See [Limits][5]
:param pulumi.Input[dict] vpc_config: Provide this to allow your function to access your VPC. Fields documented below. See [Lambda in VPC][7]
The **dead_letter_config** object supports the following:
* `target_arn` (`pulumi.Input[str]`) - The ARN of an SNS topic or SQS queue to notify when an invocation fails. If this
option is used, the function's IAM role must be granted suitable access to write to the target object,
which means allowing either the `sns:Publish` or `sqs:SendMessage` action on this ARN, depending on
which service is targeted.
The **environment** object supports the following:
* `variables` (`pulumi.Input[dict]`) - A map that defines environment variables for the Lambda function.
The **tracing_config** object supports the following:
* `mode` (`pulumi.Input[str]`) - Can be either `PassThrough` or `Active`. If PassThrough, Lambda will only trace
the request from an upstream service if it contains a tracing header with
"sampled=1". If Active, Lambda will respect any tracing header it receives
from an upstream service. If no tracing header is received, Lambda will call
X-Ray for a tracing decision.
The **vpc_config** object supports the following:
* `security_group_ids` (`pulumi.Input[list]`) - A list of security group IDs associated with the Lambda function.
* `subnet_ids` (`pulumi.Input[list]`) - A list of subnet IDs associated with the Lambda function.
* `vpc_id` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/lambda_function.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['dead_letter_config'] = dead_letter_config
__props__['description'] = description
__props__['environment'] = environment
__props__['code'] = code
__props__['name'] = name
if handler is None:
raise TypeError("Missing required property 'handler'")
__props__['handler'] = handler
__props__['kms_key_arn'] = kms_key_arn
__props__['layers'] = layers
__props__['memory_size'] = memory_size
__props__['publish'] = publish
__props__['reserved_concurrent_executions'] = reserved_concurrent_executions
if role is None:
raise TypeError("Missing required property 'role'")
__props__['role'] = role
if runtime is None:
raise TypeError("Missing required property 'runtime'")
__props__['runtime'] = runtime
__props__['s3_bucket'] = s3_bucket
__props__['s3_key'] = s3_key
__props__['s3_object_version'] = s3_object_version
__props__['source_code_hash'] = source_code_hash
__props__['tags'] = tags
__props__['timeout'] = timeout
__props__['tracing_config'] = tracing_config
__props__['vpc_config'] = vpc_config
__props__['arn'] = None
__props__['invoke_arn'] = None
__props__['last_modified'] = None
__props__['qualified_arn'] = None
__props__['source_code_size'] = None
__props__['version'] = None
super(Function, __self__).__init__(
'aws:lambda/function:Function',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, dead_letter_config=None, description=None, environment=None, code=None, name=None, handler=None, invoke_arn=None, kms_key_arn=None, last_modified=None, layers=None, memory_size=None, publish=None, qualified_arn=None, reserved_concurrent_executions=None, role=None, runtime=None, s3_bucket=None, s3_key=None, s3_object_version=None, source_code_hash=None, source_code_size=None, tags=None, timeout=None, tracing_config=None, version=None, vpc_config=None):
"""
Get an existing Function resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The Amazon Resource Name (ARN) identifying your Lambda Function.
:param pulumi.Input[dict] dead_letter_config: Nested block to configure the function's *dead letter queue*. See details below.
:param pulumi.Input[str] description: Description of what your Lambda Function does.
:param pulumi.Input[dict] environment: The Lambda environment's configuration settings. Fields documented below.
:param pulumi.Input[pulumi.Archive] code: The path to the function's deployment package within the local filesystem. If defined, The `s3_`-prefixed options cannot be used.
:param pulumi.Input[str] name: A unique name for your Lambda Function.
:param pulumi.Input[str] handler: The function [entrypoint][3] in your code.
:param pulumi.Input[str] invoke_arn: The ARN to be used for invoking Lambda Function from API Gateway - to be used in [`apigateway.Integration`](https://www.terraform.io/docs/providers/aws/r/api_gateway_integration.html)'s `uri`
:param pulumi.Input[str] kms_key_arn: The ARN for the KMS encryption key.
:param pulumi.Input[str] last_modified: The date this resource was last modified.
:param pulumi.Input[list] layers: List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See [Lambda Layers][10]
:param pulumi.Input[float] memory_size: Amount of memory in MB your Lambda Function can use at runtime. Defaults to `128`. See [Limits][5]
:param pulumi.Input[bool] publish: Whether to publish creation/change as new Lambda Function Version. Defaults to `false`.
:param pulumi.Input[str] qualified_arn: The Amazon Resource Name (ARN) identifying your Lambda Function Version
(if versioning is enabled via `publish = true`).
:param pulumi.Input[float] reserved_concurrent_executions: The amount of reserved concurrent executions for this lambda function. A value of `0` disables lambda from being triggered and `-1` removes any concurrency limitations. Defaults to Unreserved Concurrency Limits `-1`. See [Managing Concurrency][9]
:param pulumi.Input[str] role: IAM role attached to the Lambda Function. This governs both who / what can invoke your Lambda Function, as well as what resources our Lambda Function has access to. See [Lambda Permission Model][4] for more details.
:param pulumi.Input[str] runtime: See [Runtimes][6] for valid values.
:param pulumi.Input[str] s3_bucket: The S3 bucket location containing the function's deployment package. Conflicts with `filename`. This bucket must reside in the same AWS region where you are creating the Lambda function.
:param pulumi.Input[str] s3_key: The S3 key of an object containing the function's deployment package. Conflicts with `filename`.
:param pulumi.Input[str] s3_object_version: The object version containing the function's deployment package. Conflicts with `filename`.
:param pulumi.Input[str] source_code_hash: Used to trigger updates. Must be set to a base64-encoded SHA256 hash of the package file specified with either `filename` or `s3_key`. The usual way to set this is `filebase64sha256("file.zip")` (this provider 0.11.12 and later) or `base64sha256(file("file.zip"))` (this provider 0.11.11 and earlier), where "file.zip" is the local filename of the lambda function source archive.
:param pulumi.Input[float] source_code_size: The size in bytes of the function .zip file.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the object.
:param pulumi.Input[float] timeout: The amount of time your Lambda Function has to run in seconds. Defaults to `3`. See [Limits][5]
:param pulumi.Input[str] version: Latest published version of your Lambda Function.
:param pulumi.Input[dict] vpc_config: Provide this to allow your function to access your VPC. Fields documented below. See [Lambda in VPC][7]
The **dead_letter_config** object supports the following:
* `target_arn` (`pulumi.Input[str]`) - The ARN of an SNS topic or SQS queue to notify when an invocation fails. If this
option is used, the function's IAM role must be granted suitable access to write to the target object,
which means allowing either the `sns:Publish` or `sqs:SendMessage` action on this ARN, depending on
which service is targeted.
The **environment** object supports the following:
* `variables` (`pulumi.Input[dict]`) - A map that defines environment variables for the Lambda function.
The **tracing_config** object supports the following:
* `mode` (`pulumi.Input[str]`) - Can be either `PassThrough` or `Active`. If PassThrough, Lambda will only trace
the request from an upstream service if it contains a tracing header with
"sampled=1". If Active, Lambda will respect any tracing header it receives
from an upstream service. If no tracing header is received, Lambda will call
X-Ray for a tracing decision.
The **vpc_config** object supports the following:
* `security_group_ids` (`pulumi.Input[list]`) - A list of security group IDs associated with the Lambda function.
* `subnet_ids` (`pulumi.Input[list]`) - A list of subnet IDs associated with the Lambda function.
* `vpc_id` (`pulumi.Input[str]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/lambda_function.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["dead_letter_config"] = dead_letter_config
__props__["description"] = description
__props__["environment"] = environment
__props__["code"] = code
__props__["name"] = name
__props__["handler"] = handler
__props__["invoke_arn"] = invoke_arn
__props__["kms_key_arn"] = kms_key_arn
__props__["last_modified"] = last_modified
__props__["layers"] = layers
__props__["memory_size"] = memory_size
__props__["publish"] = publish
__props__["qualified_arn"] = qualified_arn
__props__["reserved_concurrent_executions"] = reserved_concurrent_executions
__props__["role"] = role
__props__["runtime"] = runtime
__props__["s3_bucket"] = s3_bucket
__props__["s3_key"] = s3_key
__props__["s3_object_version"] = s3_object_version
__props__["source_code_hash"] = source_code_hash
__props__["source_code_size"] = source_code_size
__props__["tags"] = tags
__props__["timeout"] = timeout
__props__["tracing_config"] = tracing_config
__props__["version"] = version
__props__["vpc_config"] = vpc_config
return Function(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/lambda/permission.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Permission(pulumi.CustomResource):
action: pulumi.Output[str]
"""
The AWS Lambda action you want to allow in this statement. (e.g. `lambda:InvokeFunction`)
"""
event_source_token: pulumi.Output[str]
"""
The Event Source Token to validate. Used with [Alexa Skills][1].
"""
function: pulumi.Output[str]
"""
Name of the Lambda function whose resource policy you are updating
"""
principal: pulumi.Output[str]
"""
The principal who is getting this permission.
e.g. `s3.amazonaws.com`, an AWS account ID, or any valid AWS service principal
such as `events.amazonaws.com` or `sns.amazonaws.com`.
"""
qualifier: pulumi.Output[str]
"""
Query parameter to specify function version or alias name.
The permission will then apply to the specific qualified ARN.
e.g. `arn:aws:lambda:aws-region:acct-id:function:function-name:2`
"""
source_account: pulumi.Output[str]
"""
This parameter is used for S3 and SES. The AWS account ID (without a hyphen) of the source owner.
"""
source_arn: pulumi.Output[str]
"""
When granting Amazon S3 or CloudWatch Events permission to
invoke your function, you should specify this field with the Amazon Resource Name (ARN)
for the S3 Bucket or CloudWatch Events Rule as its value. This ensures that only events
generated from the specified bucket or rule can invoke the function.
API Gateway ARNs have a unique structure described
[here](http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-control-access-using-iam-policies-to-invoke-api.html).
"""
statement_id: pulumi.Output[str]
"""
A unique statement identifier. By default generated by this provider.
"""
statement_id_prefix: pulumi.Output[str]
"""
A statement identifier prefix. This provider will generate a unique suffix. Conflicts with `statement_id`.
"""
def __init__(__self__, resource_name, opts=None, action=None, event_source_token=None, function=None, principal=None, qualifier=None, source_account=None, source_arn=None, statement_id=None, statement_id_prefix=None, __props__=None, __name__=None, __opts__=None):
"""
Creates a Lambda permission to allow external sources invoking the Lambda function
(e.g. CloudWatch Event Rule, SNS or S3).
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] action: The AWS Lambda action you want to allow in this statement. (e.g. `lambda:InvokeFunction`)
:param pulumi.Input[str] event_source_token: The Event Source Token to validate. Used with [Alexa Skills][1].
:param pulumi.Input[dict] function: Name of the Lambda function whose resource policy you are updating
:param pulumi.Input[str] principal: The principal who is getting this permission.
e.g. `s3.amazonaws.com`, an AWS account ID, or any valid AWS service principal
such as `events.amazonaws.com` or `sns.amazonaws.com`.
:param pulumi.Input[str] qualifier: Query parameter to specify function version or alias name.
The permission will then apply to the specific qualified ARN.
e.g. `arn:aws:lambda:aws-region:acct-id:function:function-name:2`
:param pulumi.Input[str] source_account: This parameter is used for S3 and SES. The AWS account ID (without a hyphen) of the source owner.
:param pulumi.Input[str] source_arn: When granting Amazon S3 or CloudWatch Events permission to
invoke your function, you should specify this field with the Amazon Resource Name (ARN)
for the S3 Bucket or CloudWatch Events Rule as its value. This ensures that only events
generated from the specified bucket or rule can invoke the function.
API Gateway ARNs have a unique structure described
[here](http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-control-access-using-iam-policies-to-invoke-api.html).
:param pulumi.Input[str] statement_id: A unique statement identifier. By default generated by this provider.
:param pulumi.Input[str] statement_id_prefix: A statement identifier prefix. This provider will generate a unique suffix. Conflicts with `statement_id`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if action is None:
raise TypeError("Missing required property 'action'")
__props__['action'] = action
__props__['event_source_token'] = event_source_token
if function is None:
raise TypeError("Missing required property 'function'")
__props__['function'] = function
if principal is None:
raise TypeError("Missing required property 'principal'")
__props__['principal'] = principal
__props__['qualifier'] = qualifier
__props__['source_account'] = source_account
__props__['source_arn'] = source_arn
__props__['statement_id'] = statement_id
__props__['statement_id_prefix'] = statement_id_prefix
super(Permission, __self__).__init__(
'aws:lambda/permission:Permission',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, action=None, event_source_token=None, function=None, principal=None, qualifier=None, source_account=None, source_arn=None, statement_id=None, statement_id_prefix=None):
"""
Get an existing Permission resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] action: The AWS Lambda action you want to allow in this statement. (e.g. `lambda:InvokeFunction`)
:param pulumi.Input[str] event_source_token: The Event Source Token to validate. Used with [Alexa Skills][1].
:param pulumi.Input[dict] function: Name of the Lambda function whose resource policy you are updating
:param pulumi.Input[str] principal: The principal who is getting this permission.
e.g. `s3.amazonaws.com`, an AWS account ID, or any valid AWS service principal
such as `events.amazonaws.com` or `sns.amazonaws.com`.
:param pulumi.Input[str] qualifier: Query parameter to specify function version or alias name.
The permission will then apply to the specific qualified ARN.
e.g. `arn:aws:lambda:aws-region:acct-id:function:function-name:2`
:param pulumi.Input[str] source_account: This parameter is used for S3 and SES. The AWS account ID (without a hyphen) of the source owner.
:param pulumi.Input[str] source_arn: When granting Amazon S3 or CloudWatch Events permission to
invoke your function, you should specify this field with the Amazon Resource Name (ARN)
for the S3 Bucket or CloudWatch Events Rule as its value. This ensures that only events
generated from the specified bucket or rule can invoke the function.
API Gateway ARNs have a unique structure described
[here](http://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-control-access-using-iam-policies-to-invoke-api.html).
:param pulumi.Input[str] statement_id: A unique statement identifier. By default generated by this provider.
:param pulumi.Input[str] statement_id_prefix: A statement identifier prefix. This provider will generate a unique suffix. Conflicts with `statement_id`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["action"] = action
__props__["event_source_token"] = event_source_token
__props__["function"] = function
__props__["principal"] = principal
__props__["qualifier"] = qualifier
__props__["source_account"] = source_account
__props__["source_arn"] = source_arn
__props__["statement_id"] = statement_id
__props__["statement_id_prefix"] = statement_id_prefix
return Permission(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/neptune/cluster_instance.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class ClusterInstance(pulumi.CustomResource):
address: pulumi.Output[str]
"""
The hostname of the instance. See also `endpoint` and `port`.
"""
apply_immediately: pulumi.Output[bool]
"""
Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
"""
arn: pulumi.Output[str]
"""
Amazon Resource Name (ARN) of neptune instance
"""
auto_minor_version_upgrade: pulumi.Output[bool]
"""
Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
"""
availability_zone: pulumi.Output[str]
"""
The EC2 Availability Zone that the neptune instance is created in.
"""
cluster_identifier: pulumi.Output[str]
"""
The identifier of the [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html) in which to launch this instance.
"""
dbi_resource_id: pulumi.Output[str]
"""
The region-unique, immutable identifier for the neptune instance.
"""
endpoint: pulumi.Output[str]
"""
The connection endpoint in `address:port` format.
"""
engine: pulumi.Output[str]
"""
The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
"""
engine_version: pulumi.Output[str]
"""
The neptune engine version.
"""
identifier: pulumi.Output[str]
"""
The indentifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
"""
identifier_prefix: pulumi.Output[str]
"""
Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
"""
instance_class: pulumi.Output[str]
"""
The instance class to use.
"""
kms_key_arn: pulumi.Output[str]
"""
The ARN for the KMS encryption key if one is set to the neptune cluster.
"""
neptune_parameter_group_name: pulumi.Output[str]
"""
The name of the neptune parameter group to associate with this instance.
"""
neptune_subnet_group_name: pulumi.Output[str]
"""
A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html).
"""
port: pulumi.Output[float]
"""
The port on which the DB accepts connections. Defaults to `8182`.
"""
preferred_backup_window: pulumi.Output[str]
"""
The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
"""
preferred_maintenance_window: pulumi.Output[str]
"""
The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
"""
promotion_tier: pulumi.Output[float]
"""
Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
"""
publicly_accessible: pulumi.Output[bool]
"""
Bool to control if instance is publicly accessible. Default is `false`.
"""
storage_encrypted: pulumi.Output[bool]
"""
Specifies whether the neptune cluster is encrypted.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the instance.
"""
writer: pulumi.Output[bool]
"""
Boolean indicating if this instance is writable. `False` indicates this instance is a read replica.
"""
def __init__(__self__, resource_name, opts=None, apply_immediately=None, auto_minor_version_upgrade=None, availability_zone=None, cluster_identifier=None, engine=None, engine_version=None, identifier=None, identifier_prefix=None, instance_class=None, neptune_parameter_group_name=None, neptune_subnet_group_name=None, port=None, preferred_backup_window=None, preferred_maintenance_window=None, promotion_tier=None, publicly_accessible=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
A Cluster Instance Resource defines attributes that are specific to a single instance in a Neptune Cluster.
You can simply add neptune instances and Neptune manages the replication. You can use the [count][1]
meta-parameter to make multiple instances and join them all to the same Neptune Cluster, or you may specify different Cluster Instance resources with various `instance_class` sizes.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] apply_immediately: Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
:param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
:param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the neptune instance is created in.
:param pulumi.Input[str] cluster_identifier: The identifier of the [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html) in which to launch this instance.
:param pulumi.Input[str] engine: The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
:param pulumi.Input[str] engine_version: The neptune engine version.
:param pulumi.Input[str] identifier: The indentifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
:param pulumi.Input[str] identifier_prefix: Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
:param pulumi.Input[str] instance_class: The instance class to use.
:param pulumi.Input[str] neptune_parameter_group_name: The name of the neptune parameter group to associate with this instance.
:param pulumi.Input[str] neptune_subnet_group_name: A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html).
:param pulumi.Input[float] port: The port on which the DB accepts connections. Defaults to `8182`.
:param pulumi.Input[str] preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
:param pulumi.Input[str] preferred_maintenance_window: The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
:param pulumi.Input[float] promotion_tier: Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
:param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly accessible. Default is `false`.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the instance.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['apply_immediately'] = apply_immediately
__props__['auto_minor_version_upgrade'] = auto_minor_version_upgrade
__props__['availability_zone'] = availability_zone
if cluster_identifier is None:
raise TypeError("Missing required property 'cluster_identifier'")
__props__['cluster_identifier'] = cluster_identifier
__props__['engine'] = engine
__props__['engine_version'] = engine_version
__props__['identifier'] = identifier
__props__['identifier_prefix'] = identifier_prefix
if instance_class is None:
raise TypeError("Missing required property 'instance_class'")
__props__['instance_class'] = instance_class
__props__['neptune_parameter_group_name'] = neptune_parameter_group_name
__props__['neptune_subnet_group_name'] = neptune_subnet_group_name
__props__['port'] = port
__props__['preferred_backup_window'] = preferred_backup_window
__props__['preferred_maintenance_window'] = preferred_maintenance_window
__props__['promotion_tier'] = promotion_tier
__props__['publicly_accessible'] = publicly_accessible
__props__['tags'] = tags
__props__['address'] = None
__props__['arn'] = None
__props__['dbi_resource_id'] = None
__props__['endpoint'] = None
__props__['kms_key_arn'] = None
__props__['storage_encrypted'] = None
__props__['writer'] = None
super(ClusterInstance, __self__).__init__(
'aws:neptune/clusterInstance:ClusterInstance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, address=None, apply_immediately=None, arn=None, auto_minor_version_upgrade=None, availability_zone=None, cluster_identifier=None, dbi_resource_id=None, endpoint=None, engine=None, engine_version=None, identifier=None, identifier_prefix=None, instance_class=None, kms_key_arn=None, neptune_parameter_group_name=None, neptune_subnet_group_name=None, port=None, preferred_backup_window=None, preferred_maintenance_window=None, promotion_tier=None, publicly_accessible=None, storage_encrypted=None, tags=None, writer=None):
"""
Get an existing ClusterInstance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address: The hostname of the instance. See also `endpoint` and `port`.
:param pulumi.Input[bool] apply_immediately: Specifies whether any instance modifications
are applied immediately, or during the next maintenance window. Default is`false`.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of neptune instance
:param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the instance during the maintenance window. Default is `true`.
:param pulumi.Input[str] availability_zone: The EC2 Availability Zone that the neptune instance is created in.
:param pulumi.Input[str] cluster_identifier: The identifier of the [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html) in which to launch this instance.
:param pulumi.Input[str] dbi_resource_id: The region-unique, immutable identifier for the neptune instance.
:param pulumi.Input[str] endpoint: The connection endpoint in `address:port` format.
:param pulumi.Input[str] engine: The name of the database engine to be used for the neptune instance. Defaults to `neptune`. Valid Values: `neptune`.
:param pulumi.Input[str] engine_version: The neptune engine version.
:param pulumi.Input[str] identifier: The indentifier for the neptune instance, if omitted, this provider will assign a random, unique identifier.
:param pulumi.Input[str] identifier_prefix: Creates a unique identifier beginning with the specified prefix. Conflicts with `identifier`.
:param pulumi.Input[str] instance_class: The instance class to use.
:param pulumi.Input[str] kms_key_arn: The ARN for the KMS encryption key if one is set to the neptune cluster.
:param pulumi.Input[str] neptune_parameter_group_name: The name of the neptune parameter group to associate with this instance.
:param pulumi.Input[str] neptune_subnet_group_name: A subnet group to associate with this neptune instance. **NOTE:** This must match the `neptune_subnet_group_name` of the attached [`neptune.Cluster`](https://www.terraform.io/docs/providers/aws/r/neptune_cluster.html).
:param pulumi.Input[float] port: The port on which the DB accepts connections. Defaults to `8182`.
:param pulumi.Input[str] preferred_backup_window: The daily time range during which automated backups are created if automated backups are enabled. Eg: "04:00-09:00"
:param pulumi.Input[str] preferred_maintenance_window: The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
:param pulumi.Input[float] promotion_tier: Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
:param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly accessible. Default is `false`.
:param pulumi.Input[bool] storage_encrypted: Specifies whether the neptune cluster is encrypted.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the instance.
:param pulumi.Input[bool] writer: Boolean indicating if this instance is writable. `False` indicates this instance is a read replica.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["address"] = address
__props__["apply_immediately"] = apply_immediately
__props__["arn"] = arn
__props__["auto_minor_version_upgrade"] = auto_minor_version_upgrade
__props__["availability_zone"] = availability_zone
__props__["cluster_identifier"] = cluster_identifier
__props__["dbi_resource_id"] = dbi_resource_id
__props__["endpoint"] = endpoint
__props__["engine"] = engine
__props__["engine_version"] = engine_version
__props__["identifier"] = identifier
__props__["identifier_prefix"] = identifier_prefix
__props__["instance_class"] = instance_class
__props__["kms_key_arn"] = kms_key_arn
__props__["neptune_parameter_group_name"] = neptune_parameter_group_name
__props__["neptune_subnet_group_name"] = neptune_subnet_group_name
__props__["port"] = port
__props__["preferred_backup_window"] = preferred_backup_window
__props__["preferred_maintenance_window"] = preferred_maintenance_window
__props__["promotion_tier"] = promotion_tier
__props__["publicly_accessible"] = publicly_accessible
__props__["storage_encrypted"] = storage_encrypted
__props__["tags"] = tags
__props__["writer"] = writer
return ClusterInstance(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/organizations/account.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Account(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The ARN for this account.
"""
email: pulumi.Output[str]
"""
The email address of the owner to assign to the new member account. This email address must not already be associated with another AWS account.
"""
iam_user_access_to_billing: pulumi.Output[str]
"""
If set to `ALLOW`, the new account enables IAM users to access account billing information if they have the required permissions. If set to `DENY`, then only the root user of the new account can access account billing information.
"""
joined_method: pulumi.Output[str]
joined_timestamp: pulumi.Output[str]
name: pulumi.Output[str]
"""
A friendly name for the member account.
"""
parent_id: pulumi.Output[str]
"""
Parent Organizational Unit ID or Root ID for the account. Defaults to the Organization default Root ID. A configuration must be present for this argument to perform drift detection.
"""
role_name: pulumi.Output[str]
"""
The name of an IAM role that Organizations automatically preconfigures in the new member account. This role trusts the master account, allowing users in the master account to assume the role, as permitted by the master account administrator. The role has administrator permissions in the new member account. The Organizations API provides no method for reading this information after account creation, so this provider cannot perform drift detection on its value and will always show a difference for a configured value after import unless [`ignoreChanges`](https://www.pulumi.com/docs/intro/concepts/programming-model/#ignorechanges) is used.
"""
status: pulumi.Output[str]
tags: pulumi.Output[dict]
"""
Key-value mapping of resource tags.
"""
def __init__(__self__, resource_name, opts=None, email=None, iam_user_access_to_billing=None, name=None, parent_id=None, role_name=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a resource to create a member account in the current organization.
> **Note:** Account management must be done from the organization's master account.
!> **WARNING:** Deleting this resource will only remove an AWS account from an organization. This provider will not close the account. The member account must be prepared to be a standalone account beforehand. See the [AWS Organizations documentation](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html) for more information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] email: The email address of the owner to assign to the new member account. This email address must not already be associated with another AWS account.
:param pulumi.Input[str] iam_user_access_to_billing: If set to `ALLOW`, the new account enables IAM users to access account billing information if they have the required permissions. If set to `DENY`, then only the root user of the new account can access account billing information.
:param pulumi.Input[str] name: A friendly name for the member account.
:param pulumi.Input[str] parent_id: Parent Organizational Unit ID or Root ID for the account. Defaults to the Organization default Root ID. A configuration must be present for this argument to perform drift detection.
:param pulumi.Input[str] role_name: The name of an IAM role that Organizations automatically preconfigures in the new member account. This role trusts the master account, allowing users in the master account to assume the role, as permitted by the master account administrator. The role has administrator permissions in the new member account. The Organizations API provides no method for reading this information after account creation, so this provider cannot perform drift detection on its value and will always show a difference for a configured value after import unless [`ignoreChanges`](https://www.pulumi.com/docs/intro/concepts/programming-model/#ignorechanges) is used.
:param pulumi.Input[dict] tags: Key-value mapping of resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if email is None:
raise TypeError("Missing required property 'email'")
__props__['email'] = email
__props__['iam_user_access_to_billing'] = iam_user_access_to_billing
__props__['name'] = name
__props__['parent_id'] = parent_id
__props__['role_name'] = role_name
__props__['tags'] = tags
__props__['arn'] = None
__props__['joined_method'] = None
__props__['joined_timestamp'] = None
__props__['status'] = None
super(Account, __self__).__init__(
'aws:organizations/account:Account',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, email=None, iam_user_access_to_billing=None, joined_method=None, joined_timestamp=None, name=None, parent_id=None, role_name=None, status=None, tags=None):
"""
Get an existing Account resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN for this account.
:param pulumi.Input[str] email: The email address of the owner to assign to the new member account. This email address must not already be associated with another AWS account.
:param pulumi.Input[str] iam_user_access_to_billing: If set to `ALLOW`, the new account enables IAM users to access account billing information if they have the required permissions. If set to `DENY`, then only the root user of the new account can access account billing information.
:param pulumi.Input[str] name: A friendly name for the member account.
:param pulumi.Input[str] parent_id: Parent Organizational Unit ID or Root ID for the account. Defaults to the Organization default Root ID. A configuration must be present for this argument to perform drift detection.
:param pulumi.Input[str] role_name: The name of an IAM role that Organizations automatically preconfigures in the new member account. This role trusts the master account, allowing users in the master account to assume the role, as permitted by the master account administrator. The role has administrator permissions in the new member account. The Organizations API provides no method for reading this information after account creation, so this provider cannot perform drift detection on its value and will always show a difference for a configured value after import unless [`ignoreChanges`](https://www.pulumi.com/docs/intro/concepts/programming-model/#ignorechanges) is used.
:param pulumi.Input[dict] tags: Key-value mapping of resource tags.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["email"] = email
__props__["iam_user_access_to_billing"] = iam_user_access_to_billing
__props__["joined_method"] = joined_method
__props__["joined_timestamp"] = joined_timestamp
__props__["name"] = name
__props__["parent_id"] = parent_id
__props__["role_name"] = role_name
__props__["status"] = status
__props__["tags"] = tags
return Account(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
```
#### File: pulumi_aws/servicediscovery/service.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Service(pulumi.CustomResource):
arn: pulumi.Output[str]
"""
The ARN of the service.
"""
description: pulumi.Output[str]
"""
The description of the service.
"""
dns_config: pulumi.Output[dict]
"""
A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance.
* `dnsRecords` (`list`) - An array that contains one DnsRecord object for each resource record set.
* `ttl` (`float`) - The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set.
* `type` (`str`) - The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
* `namespace_id` (`str`) - The ID of the namespace to use for DNS configuration.
* `routingPolicy` (`str`) - The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED
"""
health_check_config: pulumi.Output[dict]
"""
A complex type that contains settings for an optional health check. Only for Public DNS namespaces.
* `failure_threshold` (`float`) - The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
* `resource_path` (`str`) - The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /.
* `type` (`str`) - The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
"""
health_check_custom_config: pulumi.Output[dict]
"""
A complex type that contains settings for ECS managed health checks.
* `failure_threshold` (`float`) - The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
"""
name: pulumi.Output[str]
"""
The name of the service.
"""
namespace_id: pulumi.Output[str]
"""
The ID of the namespace to use for DNS configuration.
"""
def __init__(__self__, resource_name, opts=None, description=None, dns_config=None, health_check_config=None, health_check_custom_config=None, name=None, namespace_id=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Service Discovery Service resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the service.
:param pulumi.Input[dict] dns_config: A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance.
:param pulumi.Input[dict] health_check_config: A complex type that contains settings for an optional health check. Only for Public DNS namespaces.
:param pulumi.Input[dict] health_check_custom_config: A complex type that contains settings for ECS managed health checks.
:param pulumi.Input[str] name: The name of the service.
:param pulumi.Input[str] namespace_id: The ID of the namespace to use for DNS configuration.
The **dns_config** object supports the following:
* `dnsRecords` (`pulumi.Input[list]`) - An array that contains one DnsRecord object for each resource record set.
* `ttl` (`pulumi.Input[float]`) - The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set.
* `type` (`pulumi.Input[str]`) - The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
* `namespace_id` (`pulumi.Input[str]`) - The ID of the namespace to use for DNS configuration.
* `routingPolicy` (`pulumi.Input[str]`) - The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED
The **health_check_config** object supports the following:
* `failure_threshold` (`pulumi.Input[float]`) - The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
* `resource_path` (`pulumi.Input[str]`) - The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /.
* `type` (`pulumi.Input[str]`) - The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
The **health_check_custom_config** object supports the following:
* `failure_threshold` (`pulumi.Input[float]`) - The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['dns_config'] = dns_config
__props__['health_check_config'] = health_check_config
__props__['health_check_custom_config'] = health_check_custom_config
__props__['name'] = name
__props__['namespace_id'] = namespace_id
__props__['arn'] = None
super(Service, __self__).__init__(
'aws:servicediscovery/service:Service',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, description=None, dns_config=None, health_check_config=None, health_check_custom_config=None, name=None, namespace_id=None):
"""
Get an existing Service resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the service.
:param pulumi.Input[str] description: The description of the service.
:param pulumi.Input[dict] dns_config: A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance.
:param pulumi.Input[dict] health_check_config: A complex type that contains settings for an optional health check. Only for Public DNS namespaces.
:param pulumi.Input[dict] health_check_custom_config: A complex type that contains settings for ECS managed health checks.
:param pulumi.Input[str] name: The name of the service.
:param pulumi.Input[str] namespace_id: The ID of the namespace to use for DNS configuration.
The **dns_config** object supports the following:
* `dnsRecords` (`pulumi.Input[list]`) - An array that contains one DnsRecord object for each resource record set.
* `ttl` (`pulumi.Input[float]`) - The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set.
* `type` (`pulumi.Input[str]`) - The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
* `namespace_id` (`pulumi.Input[str]`) - The ID of the namespace to use for DNS configuration.
* `routingPolicy` (`pulumi.Input[str]`) - The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED
The **health_check_config** object supports the following:
* `failure_threshold` (`pulumi.Input[float]`) - The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
* `resource_path` (`pulumi.Input[str]`) - The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /.
* `type` (`pulumi.Input[str]`) - The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP
The **health_check_custom_config** object supports the following:
* `failure_threshold` (`pulumi.Input[float]`) - The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["description"] = description
__props__["dns_config"] = dns_config
__props__["health_check_config"] = health_check_config
__props__["health_check_custom_config"] = health_check_custom_config
__props__["name"] = name
__props__["namespace_id"] = namespace_id
return Service(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
``` |
{
"source": "johnkuangwork/deploy-turnover-demo-ind",
"score": 3
} |
#### File: deploy-turnover-demo-ind/.ipynb_checkpoints/Online V1-checkpoint.py
```python
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor,RandomForestClassifier
import shap
# shap.initjs()
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
import pickle
import copy
import gradio as gr
import re
# In[2]:
# Load the Model and categorial levels back from file
Filename = "Model/Model_RF.pkl"
with open(Filename, 'rb') as file:
model = pickle.load(file)
Filename = "Model/cat_data_dict.pkl"
with open(Filename, 'rb') as file:
cat_data_dict = pickle.load(file)
Filename = "Model/Explainer.pkl"
with open(Filename, 'rb') as file:
explainer = pickle.load(file)
sample_input = pd.read_excel(r'Model\sample_input.xlsx',sheet_name='Sheet1')
pred_input = copy.deepcopy(sample_input)
# In[6]:
inputs_list = [
gr.inputs.Slider(minimum=18, maximum=100, step=1, default=40, label="Age"),
gr.inputs.Slider(minimum=0, maximum=30, step=1, default=5, label="Tenure with company"),
gr.inputs.Number(default=100000, label='Base Salary'),
gr.inputs.Slider(minimum=0, maximum=350, step=10, default=150, label="PTO Taken (in Hours)"),
gr.inputs.Dropdown(choices=cat_data_dict['FUNCTION'], type="value", default=None, label="Sub Job Function")
]
# In[7]:
def predict_turnover(AGE, TENURE_COMPANY, SALARY, PTO, FUNCTION):
pred_input['AGE'] = AGE
pred_input['TENURE_COMPANY'] = TENURE_COMPANY
pred_input['SALARY'] = SALARY
pred_input['PTO'] = PTO
FUNCTION_COL = "FUNCTION_"+FUNCTION
pred_input[FUNCTION_COL] = 1
# Make prediction
y_score_stay = model.predict_proba(pred_input)[:,0][0]
y_score_turnover = model.predict_proba(pred_input)[:,1][0]
pred_dict = {'Stay':y_score_stay, 'Leave': y_score_turnover}
# , pred_input
# Explain with SHAP
plt.clf()
choosen_instance = pred_input.iloc[0]
shap_values_instance = explainer.shap_values(choosen_instance)
shap.waterfall_plot(shap.Explanation(values=shap_values_instance[1],
base_values=explainer.expected_value[1],
data=pred_input.iloc[0]),max_display=5,show=False)
# plt.savefig('online_shap.jpg', bbox_inches="tight")
plt.tight_layout()
plt.spring()
return pred_dict, plt
# In[8]:
output_result = gr.outputs.Label(num_top_classes=2, label = 'Probability of Leaving in the next 12 months')
# output_input = gr.outputs.Dataframe(headers=None, max_rows=3, max_cols=10, overflow_row_behaviour="paginate", type="auto", label="Inputs")
output_img = gr.outputs.Image(type="auto", labeled_segments=False, label="SHAP")
outputs_list = [output_result, output_img]
# outputs_list = [output_result]
# In[9]:
iface = gr.Interface(
fn = predict_turnover,
inputs = inputs_list,
outputs = outputs_list,
live = True,
theme = "compact",
interpretation=None,
title="Predict Company Turnover",
description="Enter employee information",
flagging_options=["Correct", "Wrong", "Not sure"]
)
# In[10]:
iface.launch(share=True)
# In[ ]:
# In[11]:
# y_score_turnover = model.predict_proba(sample_input)[:,1][0]
# y_score_stay = model.predict_proba(sample_input)[:,0][0]
# In[12]:
# male_words, female_words = ["he", "his", "him"], ["she", "her"]
# def gender_of_sentence(sentence):
# male_count = len([word for word in sentence.split() if word.lower() in male_words])
# female_count = len([word for word in sentence.split() if word.lower() in female_words])
# total = max(male_count + female_count, 1)
# return {"male": male_count / total, "female": female_count / total}
# def interpret_gender(sentence):
# result = gender_of_sentence(sentence)
# is_male = result["male"] > result["female"]
# interpretation = []
# for word in re.split('( )', sentence):
# score = 0
# token = word.lower()
# if (is_male and token in male_words) or (not is_male and token in female_words):
# score = 1
# elif (is_male and token in female_words) or (not is_male and token in male_words):
# score = -0.2
# interpretation.append((word, score))
# return interpretation
# iface = gr.Interface(
# fn=gender_of_sentence, inputs=gr.inputs.Textbox(default="She went to his house to get her keys."),
# outputs="label", interpretation=interpret_gender, enable_queue=True)
# iface.launch()
# In[ ]:
``` |
{
"source": "johnkuney/bitcoin-abc",
"score": 2
} |
#### File: test/functional/abc_p2p_avalanche_voting_proofs.py
```python
from test_framework.avatools import (
create_coinbase_stakes,
get_ava_p2p_interface,
)
from test_framework.key import ECKey, ECPubKey
from test_framework.messages import (
MSG_AVA_PROOF,
AvalancheProofVoteResponse,
AvalancheVote,
FromHex,
LegacyAvalancheProof,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from test_framework.wallet_util import bytes_to_wif
class AvalancheTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [
[
'-enableavalanche=1',
'-avacooldown=0',
'-avalancheconflictingproofcooldown=0',
'[email protected]',
],
]
self.supports_cli = False
def run_test(self):
node = self.nodes[0]
ava_node = get_ava_p2p_interface(self.nodes[0])
# Generate coinbases to use for stakes
stakes_key = node.get_deterministic_priv_key()
blocks = node.generatetoaddress(4, stakes_key.address)
# Get the ava key so we can verify signatures.
ava_key = ECPubKey()
ava_key.set(bytes.fromhex(node.getavalanchekey()))
# Get stakes key so we can sign stakes
priv_key = ECKey()
priv_key.set(bytes.fromhex(
"12b004fff7f4b69ef8650e767f18f11ede158148b425660723b9f9a66e61f747"
), True)
master_key = bytes_to_wif(priv_key.get_bytes())
def create_proof(stakes):
proof = node.buildavalancheproof(11, 12, master_key, stakes)
proof_id = FromHex(LegacyAvalancheProof(), proof).proofid
return proof, proof_id
# proof_0 is valid right now
stakes_0 = create_coinbase_stakes(node, [blocks[0]], stakes_key.key)
proof_0, proof_0_id = create_proof(stakes_0)
# proof_1 is valid right now, and from different stakes
stakes_1 = create_coinbase_stakes(node, [blocks[1]], stakes_key.key)
proof_1, proof_1_id = create_proof(stakes_1)
# proof_2 is an orphan because the stake UTXO is unknown
stakes_2 = create_coinbase_stakes(node, [blocks[2]], stakes_key.key)
stakes_2[0]['height'] = 5
proof_2, proof_2_id = create_proof(stakes_2)
# proof_3 conflicts with proof_0 and proof_1
stakes_3 = create_coinbase_stakes(
node, [blocks[0], blocks[1]], stakes_key.key)
proof_3, proof_3_id = create_proof(stakes_3)
# proof_4 is invalid and should be rejected
stakes_4 = create_coinbase_stakes(node, [blocks[3]], stakes_key.key)
stakes_4[0]['amount'] -= 100000
proof_4, proof_4_id = create_proof(stakes_4)
# Create a helper to issue a poll and validate the responses
def poll_assert_response(expected):
# Issue a poll for each proof
self.log.info("Trigger polling from the node...")
ava_node.send_poll(
[proof_0_id, proof_1_id, proof_2_id, proof_3_id, proof_4_id],
MSG_AVA_PROOF)
response = ava_node.wait_for_avaresponse()
r = response.response
# Verify signature
assert ava_key.verify_schnorr(response.sig, r.get_hash())
# Verify votes
votes = r.votes
assert_equal(len(votes), len(expected))
for i in range(0, len(votes)):
assert_equal(repr(votes[i]), repr(expected[i]))
# Check that all proofs start unknown
poll_assert_response([
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_0_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_1_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_2_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_3_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_4_id)])
# Send the first proof. Nodes should now respond that it's accepted
node.sendavalancheproof(proof_0)
poll_assert_response([
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_0_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_1_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_2_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_3_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_4_id)])
# Send and check the 2nd proof. Nodes should now respond that it's
# accepted
node.sendavalancheproof(proof_1)
poll_assert_response([
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_0_id),
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_1_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_2_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_3_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_4_id)])
# The next proof should be rejected/put in the orphan pool
ava_node.send_proof(FromHex(LegacyAvalancheProof(), proof_2))
poll_assert_response([
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_0_id),
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_1_id),
AvalancheVote(AvalancheProofVoteResponse.ORPHAN, proof_2_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_3_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_4_id)])
# The next proof should be rejected and marked as a conflicting proof
assert_raises_rpc_error(-8,
"The proof has conflicting utxo with an existing proof",
node.sendavalancheproof, proof_3)
poll_assert_response([
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_0_id),
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_1_id),
AvalancheVote(AvalancheProofVoteResponse.ORPHAN, proof_2_id),
AvalancheVote(AvalancheProofVoteResponse.CONFLICT, proof_3_id),
AvalancheVote(AvalancheProofVoteResponse.UNKNOWN, proof_4_id)])
# The final proof should be permanently rejected for being completely
# invalid
ava_node.send_proof(FromHex(LegacyAvalancheProof(), proof_4))
poll_assert_response([
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_0_id),
AvalancheVote(AvalancheProofVoteResponse.ACTIVE, proof_1_id),
AvalancheVote(AvalancheProofVoteResponse.ORPHAN, proof_2_id),
AvalancheVote(AvalancheProofVoteResponse.CONFLICT, proof_3_id),
AvalancheVote(AvalancheProofVoteResponse.REJECTED, proof_4_id)])
if __name__ == '__main__':
AvalancheTest().main()
``` |
{
"source": "JohnKurian/mosaic",
"score": 3
} |
#### File: mosaic/simulation/parameter.py
```python
import random
from mosaic.utils import random_uniform_on_log_space
class Parameter():
def __init__(self, name = None, value_list = [], type_sampling = None,
type = None):
self.name = name
self.value_list = value_list
self.type_sampling = type_sampling
self.type = type
if type_sampling not in ["uniform", "choice", "constant", "log_uniform"]:
raise Exception("Can not handle {0} type".format(self.type))
def get_info(self):
return self.value_list, self.type_sampling
def sample_new_value(self):
if self.type_sampling == "choice":
return random.choice(self.value_list)
elif self.type_sampling == "uniform":
if self.type == 'int':
return random.randint(self.value_list[0], self.value_list[1])
else:
return random.uniform(self.value_list[0], self.value_list[1])
elif self.type_sampling == "constant":
return self.value_list
elif self.type_sampling == "log_uniform":
return random_uniform_on_log_space(self.value_list[0], self.value_list[1])
```
#### File: simulation/test/test_scenario.py
```python
import unittest
from mosaic.simulation import scenario
from mosaic.simulation.scenario import *
class TestScenario(unittest.TestCase):
def test_constructor(self):
importance_scenario = scenario.ImportanceScenarioStatic({}, [])
assert(isinstance(importance_scenario, scenario.AbstractImportanceScenario))
assert (isinstance(importance_scenario, scenario.BaseScenario))
for class_scenario in [scenario.WorkflowListTask, scenario.WorkflowChoiceScenario, scenario.WorkflowComplexScenario]:
workflow_scenario = class_scenario()
assert (isinstance(workflow_scenario, scenario.AbstractWorkflowScenario))
assert (isinstance(workflow_scenario, scenario.BaseScenario))
def test_importance_static(self):
graph = {
"root": ["algo"],
"algo": ["algo__param1"],
"algo__param1": ["algo__param2"],
"algo__param2": ["algo__param3"],
"algo__param3": ["algo__param4"]
}
sc = scenario.ImportanceScenarioStatic(graph, [])
assert (sc.call() == "root")
assert(sc.call() == "algo")
assert (sc.call() == "algo__param1")
assert (sc.call() == "algo__param2")
assert (sc.call() == "algo__param3")
assert (sc.call() == "algo__param4")
sc = scenario.ImportanceScenarioStatic(graph, [])
for task in ["root", "algo", "algo__param1", "algo__param2", "algo__param3", "algo__param4"]:
assert (sc.execute(task) == task)
def test_workflow_call(self):
arr1 = ["x1_p1", "x1_p2"]
arr2 = ["x2_p1", "x2_p2"]
x1 = WorkflowListTask(name ="x1", is_ordered=False, tasks = arr1.copy())
x2 = WorkflowListTask(name ="x2", is_ordered=True, tasks = arr2.copy())
start = WorkflowComplexScenario(name ="Model", scenarios=[x1, x2], is_ordered=True)
assert(start.call() == "Model")
assert(start.call() == "x1")
assert(start.call() in ["x1_p1", "x1_p2"])
assert(start.call() in ["x1_p1", "x1_p2"])
assert(start.call() == "x2")
assert(start.call() == "x2_p1")
assert(start.call() == "x2_p2")
def test_workflow_execute(self):
arr1 = ["x1_p1"]
arr2 = ["x2_p1", "x2_p2"]
x1 = WorkflowListTask(name ="x1", is_ordered=False, tasks = arr1.copy())
x2 = WorkflowListTask(name ="x2", is_ordered=True, tasks = arr2.copy())
start = WorkflowComplexScenario(name ="Model", scenarios=[x1, x2], is_ordered=True)
assert(start.execute("Model") == "Model")
assert(start.execute("x1") == "x1")
assert(start.execute("x1_p1") == "x1_p1")
assert(start.execute("x2") == "x2")
assert(start.execute("x2_p1") == "x2_p1")
assert(start.execute("x2_p2") == "x2_p2")
def test_workflow_queue_task(self):
arr1 = ["x1_p1"]
arr2 = ["x2_p1", "x2_p2"]
x1 = WorkflowListTask(name ="x1", is_ordered=False, tasks = arr1.copy())
x2 = WorkflowListTask(name ="x2", is_ordered=True, tasks = arr2.copy())
start = WorkflowComplexScenario(name ="Model", scenarios=[x1, x2], is_ordered=True)
assert(start.queue_tasks() == ["Model"])
start.call()
assert(start.queue_tasks() == ["x1"])
start.call()
assert(start.queue_tasks() == ["x1_p1"])
start.call()
assert(start.queue_tasks() == ["x2"])
start.call()
assert(start.queue_tasks() == ["x2_p1"])
def test_workflow_finished(self):
arr1 = ["x1_p1", "x1_p2"]
arr2 = ["x2_p1", "x2_p2"]
x1 = WorkflowListTask(name ="x1", is_ordered=False, tasks = arr1)
x2 = WorkflowListTask(name ="x2", is_ordered=True, tasks = arr2)
start = WorkflowComplexScenario(name ="Model", scenarios=[x1, x2], is_ordered=True)
for t in range(4):
start.call()
assert(x1.finished())
for t in range(3):
start.call()
assert(x2.finished())
assert(start.finished())
def test_workflow_choice_scenario(self):
arr1 = ["x1_p1", "x1_p2"]
arr2 = ["x2_p1", "x2_p2"]
x1 = WorkflowListTask(name ="x1", is_ordered=False, tasks = arr1)
x2 = WorkflowListTask(name ="x2", is_ordered=True, tasks = arr2)
start = WorkflowChoiceScenario(name ="Model", scenarios=[x1, x2])
assert(start.call() == "Model")
assert(start.queue_tasks() == ["x1", "x2"])
start.execute("x2")
assert(start.call() == "x2_p1")
assert(start.call() == "x2_p2")
assert(start.finished())
def test_workflow_choice_complex_scenario(self):
arr1 = ["x1_p1", "x1_p2"]
arr2 = ["x2_p1", "x2_p2"]
arr3 = ["x3_p1", "x3_p2"]
arr4 = ["x4_p1", "x4_p2"]
x1 = WorkflowListTask(name ="x1", is_ordered=True, tasks = arr1)
x2 = WorkflowListTask(name ="x2", is_ordered=True, tasks = arr2)
x3 = WorkflowListTask(name ="x3", is_ordered=True, tasks = arr3)
x4 = WorkflowListTask(name ="x4", is_ordered=True, tasks = arr4)
c1 = WorkflowChoiceScenario(name ="choix_1", scenarios=[x1, x2])
c2 = WorkflowChoiceScenario(name ="choix_2", scenarios=[x3, x4])
start = WorkflowComplexScenario(name ="Model", scenarios=[c1, c2], is_ordered = True)
assert(start.call() == "Model")
assert(start.queue_tasks() == ["choix_1"])
assert(start.call() == "choix_1")
assert(start.call() in ["x1", "x2"])
assert(start.call() in ["x1_p1", "x2_p1"])
assert(start.call() in ["x1_p2", "x2_p2"])
assert(start.call() == "choix_2")
assert(start.call() in ["x3", "x4"])
assert(start.call() in ["x3_p1", "x4_p1"])
assert(start.call() in ["x3_p2", "x4_p2"])
```
#### File: mosaic/strategy/early_stopping.py
```python
from mosaic.strategy import BaseEarlyStopping
class Hyperband(BaseEarlyStopping):
def __init__(self):
super().__init__()
def evaluate(self):
pass
```
#### File: mosaic/strategy/__init__.py
```python
import logging
class BaseStrategy():
def __init__(self):
self.logger = logging.getLogger("mcts")
def selection(self, parent, ids, vals, visits, state=None):
pass
def expansion(self, sampler, arg):
return sampler(*arg)
def backpropagate(self, id, value, visit, reward):
new_val = value + (reward - value) / (visit + 1)
return new_val, visit + 1
def playout(self):
pass
class BaseEarlyStopping():
def __init__(self):
pass
def evaluate(self, func, args):
return func(*args)
```
#### File: mosaic/test/test_space.py
```python
import unittest
from mosaic.space import Space
from mosaic.simulation.parameter import Parameter
from mosaic.simulation.rules import ChildRule
from mosaic.simulation.scenario import WorkflowListTask, WorkflowChoiceScenario
class TestSpace(unittest.TestCase):
def test_init(self):
pass
def test_next_params(self):
def a_func(): return 0
def b_func(): return 0
def c_func(): return 0
x1 = WorkflowListTask(is_ordered=False, name ="x1", tasks = ["x1__p1", "x1__p2"])
x2 = WorkflowListTask(is_ordered=True, name ="x2", tasks = ["x2__p1", "x2__p2", "x2__p3"])
start = WorkflowChoiceScenario(name ="Model", scenarios=[x1, x2])
sampler = { "x1__p1": Parameter("x1__p1", [0, 1], "uniform", "float"),
"x1__p2": Parameter("x1__p2", [1, 2, 3, 4, 5, 6, 7], "choice", "int"),
"x2__p1": Parameter("x2__p1", ["a", "b", "c", "d"], "choice", "string"),
"x2__p2": Parameter("x2__p2", [a_func, b_func, c_func], "choice", "func"),
"x2__p3": Parameter("x2__p3", "lol", "constant", "string"),
}
space = Space(scenario = start, sampler = sampler)
for i in range(50):
assert(space.next_params(history=[("Model", None), ("x2", None), ("x2__p1", "c"), ("x2__p2", a_func)]) == ("x2__p3", "lol", True))
assert(space.next_params(history=[("Model", None), ("x2", None), ("x2__p1", "c")])[0] == "x2__p2")
assert(space.next_params(history=[("Model", None), ("x2", None), ("x2__p1", "a")])[0] == "x2__p2")
assert(space.next_params(history=[("Model", None), ("x2", None)])[0] == "x2__p1")
assert(space.next_params(history=[("Model", None), ("x1", None)])[0] in ["x1__p1", "x1__p2"])
assert(space.next_params(history=[("Model", None), ("x1", None), ("x1__p2", 5)])[0] in ["x1__p1", "x1__p2"])
assert(space.next_params(history=[("Model", None)])[0] in ["x1", "x2"])
def test_is_valid(self):
x1 = WorkflowListTask(is_ordered=False, name ="x1", tasks = ["x1__p1", "x1__p2"])
x2 = WorkflowListTask(is_ordered=True, name ="x2", tasks = ["x2__p1", "x2__p2", "x2__p3"])
start = WorkflowChoiceScenario(name ="Model", scenarios=[x1, x2])
sampler = { "x1__p1": Parameter("x1__p1", [0, 1], "uniform", "float"),
"x1__p2": Parameter("x1__p2", [1, 2, 3, 4, 5, 6, 7], "choice", "int"),
"x2__p1": Parameter("x2__p1", ["a", "b", "c", "d"], "choice", "string"),
"x2__p2": Parameter("x2__p2", [10, 11, 12], "choice", "int"),
"x2__p3": Parameter("x2__p3", "lol", "constant", "string"),
}
rules = [ChildRule(applied_to = ["x2__p2"], parent = "x2__p1", value = ["a"])]
space = Space(scenario = start, sampler = sampler, rules = rules)
assert(space.next_params(history=[("Model", None), ("x2", None), ("x2__p1", "a")])[0] in ["x2__p2", "x2__p3"])
assert(space.has_finite_child(history=[("Model", None), ("x2", None), ("x2__p1", "b")])[1] == 0)
assert(space.has_finite_child(history=[("Model", None), ("x2", None), ("x2__p1", "a")])[1] > 0)
def test_sample(self):
def a_func(): return 0
def b_func(): return 0
def c_func(): return 0
x1 = WorkflowListTask(is_ordered=False, name ="x1", tasks = ["x1__p1", "x1__p2"])
x2 = WorkflowListTask(is_ordered=True, name ="x2", tasks = ["x2__p1", "x2__p2"])
start = WorkflowChoiceScenario(name ="Model", scenarios=[x1, x2])
sampler = { "x1__p1": Parameter("x1__p1", [0, 1], "uniform", "float"),
"x1__p2": Parameter("x1__p2", [1, 2, 3, 4, 5, 6, 7], "choice", "int"),
"x2__p1": Parameter("x2__p1", ["a", "b", "c", "d"], "choice", "string"),
"x2__p2": Parameter("x2__p2", [a_func, b_func, c_func], "choice", "func"),
}
space = Space(scenario = start, sampler = sampler)
for i in range(10):
v = space.sample("x1__p1")
assert(v >= 0)
assert(v <= 1)
assert(space.sample("x1__p2") in [1, 2, 3, 4, 5, 6, 7])
assert(space.sample("x2__p1") in ["a", "b", "c", "d"])
assert(space.sample("x2__p2") in [a_func, b_func, c_func])
def test_playout(self):
def a_func(): return 0
def b_func(): return 0
def c_func(): return 0
x1 = WorkflowListTask(is_ordered=False, name ="x1", tasks = ["x1__p1", "x1__p2"])
x2 = WorkflowListTask(is_ordered=True, name ="x2", tasks = ["x2__p1", "x2__p2"])
start = WorkflowChoiceScenario(name ="Model", scenarios=[x1, x2])
sampler = { "x1__p1": Parameter("x1__p1", [0, 1], "uniform", "float"),
"x1__p2": Parameter("x1__p2", [1, 2, 3, 4, 5, 6, 7], "choice", "int"),
"x2__p1": Parameter("x2__p1", ["a", "b", "c", "d"], "choice", "string"),
"x2__p2": Parameter("x2__p2", [a_func, b_func, c_func], "choice", "func"),
}
space = Space(scenario = start, sampler = sampler)
for i in range(10):
space.playout(history = [("Model", None)])
def test_has_finite_child(self):
def a_func(): return 0
def b_func(): return 0
def c_func(): return 0
x1 = WorkflowListTask(is_ordered=False, name ="x1", tasks = ["x1__p1", "x1__p2"])
x2 = WorkflowListTask(is_ordered=True, name ="x2", tasks = ["x2__p1", "x2__p2"])
start = WorkflowChoiceScenario(name ="Model", scenarios=[x1, x2])
sampler = { "x1__p1": Parameter("x1__p1", [0, 1], "uniform", "float"),
"x1__p2": Parameter("x1__p2", [1, 2, 3, 4, 5, 6, 7], "choice", "int"),
"x2__p1": Parameter("x2__p1", ["a", "b", "c", "d"], "choice", "string"),
"x2__p2": Parameter("x2__p2", [a_func, b_func, c_func], "choice", "func"),
}
space = Space(scenario = start, sampler = sampler)
assert(space.has_finite_child(history = [("Model", None)]) == (False, 2))
assert(space.has_finite_child(history = [("Model", None), ("x1", None)]) == (False, 17))
assert(space.has_finite_child(history = [("Model", None), ("x1", None), ("x1__p1", 0.5)]) == (False, 7))
assert(space.has_finite_child(history = [("Model", None), ("x1", None), ("x1__p1", 0.5), ("x1__p2", 1)]) == (False, 0))
assert(space.has_finite_child(history = [("Model", None), ("x2", None)]) == (False, 4))
assert(space.has_finite_child(history = [("Model", None), ("x2", None), ("x2__p1", "a")]) == (False, 3))
assert(space.has_finite_child(history = [("Model", None), ("x2", None), ("x2__p1", "c"), ("x2__p2", c_func)]) == (False, 0))
``` |
{
"source": "JohnKurian/TableGPT",
"score": 3
} |
#### File: JohnKurian/TableGPT/preprocess.py
```python
import time
import os
import string
import queue
import encoder
from tqdm import tqdm
import sys
# bpe vocab
enc = encoder.get_encoder("117M")
# “#”
field_empty = 2
eos = 50256
def join_box(list_in):
"""
Filters empty fields, combines multiple values into same field
Args:
list_in: list of field value pairs
Returns:
List of tuples of (field_name, (value1, value2, ...))
"""
out_list = []
current_name = ""
current_value = ""
for each_item in list_in:
field_name = each_item.split(":")[0]
field_value = each_item.split(":")[1]
if field_name == "":
continue
if not field_name[-1].isdigit():
if field_value != "<none>":
out_list.append((field_name, field_value))
continue
field_name = "_".join(field_name.split("_")[:-1])
if field_name != current_name:
if current_name != "":
# remove none value
if current_value.strip() != "<none>":
out_list.append((current_name, current_value.strip()))
current_name = ""
current_value = ""
current_name = field_name
current_value += (field_value + " ")
if current_value.strip() != "<none>":
out_list.append((current_name, current_value.strip()))
sorted_by_second = sorted(out_list, key=lambda tup: len(tup[1].split(" ")), reverse=True)
return out_list, sorted_by_second
def load_dem_map(file_in):
# TODO
"""
recursively load nationality map
Args:
file_in:
Returns:
"""
dem_map = {}
with open(file_in) as f:
for line in f:
line_list = line.strip().lower().split(",")
if line_list[0] not in dem_map:
dem_map[line_list[0]] = []
if line_list[1] not in dem_map[line_list[0]]:
dem_map[line_list[0]].append(line_list[1])
if line_list[1] not in dem_map:
dem_map[line_list[1]] = []
if line_list[0] not in dem_map[line_list[1]]:
dem_map[line_list[1]].append(line_list[0])
final_res_map = {}
for each_con in dem_map:
res_con = []
q = queue.Queue()
q.put(each_con)
while not q.empty():
con = q.get()
if con in res_con:
continue
res_con.append(con)
if con in dem_map:
for each_sub in dem_map[con]:
q.put(each_sub)
final_res_map[each_con] = res_con
return final_res_map
def fuzzy_match_rep(source, substring, field_name):
# TODO
"""
Args:
source:
substring:
field_name:
Returns:
"""
this_value = substring
out_summary = source
this_value_list_raw = this_value.split(" ")
out_summary_list = out_summary.split(" ")
# print this_value_list
# print out_summary_list
this_value_list = []
for token in this_value_list_raw:
if not(token in string.punctuation) \
and token != "(" \
and token != ")" \
and token != "-lsb-" \
and token != "-rsb-":
this_value_list.append(token)
if len(this_value_list) == 0:
return out_summary
num_consist = 0
min_index = len(out_summary_list) + 1
max_index = -1
for token in this_value_list:
if token in out_summary_list:
num_consist += 1
this_ind = out_summary_list.index(token)
if this_ind < min_index:
min_index = this_ind
if this_ind > max_index:
max_index = this_ind
# print num_consist
# print min_index
# print max_index
if float(num_consist) / len(this_value_list) > 0.4:
if max_index - min_index <= 2 * len(this_value_list):
### regard as match
to_replace = " ".join(out_summary_list[min_index:max_index+1])
replace_len = len(to_replace.split(" "))
if out_summary.startswith(to_replace):
out_summary = out_summary.replace(to_replace + " ", ("<" + field_name + "> ") * replace_len)
else:
out_summary = out_summary.replace(" " + to_replace + " ", " " + ("<" + field_name + "> ") * replace_len)
return out_summary
def gen_mask_field_pos(dem_file, in_summary, in_box, out_field, out_pos, out_rpos):
"""
Mask out the values in the summary by the corresponding fields
Args:
dem_file: demonymns file
in_summary: str, summary file
in_box: str, box file
out_field: masked summary
out_pos: summary with field position values
out_rpos: summary with reversed field position values
Returns:
None
"""
### load nationality demonyms.csv
dem_map = load_dem_map(dem_file)
with open(in_box) as f:
lines_box = f.readlines()
with open(in_summary) as f:
lines_summary = f.readlines()
out_s = open(out_field, "w")
out_p = open(out_pos, "w")
out_rp = open(out_rpos, "w")
for box, summary in tqdm(zip(lines_box, lines_summary)):
box = box.replace("-lrb-", "(")
box = box.replace("-rrb-", ")")
box_list = box.strip().split("\t")
box_out_list, box_field_list = join_box(box_list)
summary = summary.replace("-lrb-", "(")
summary = summary.replace("-rrb-", ")")
tem_summary = summary.strip()
out_summary = summary.strip()
tem_summary_list = tem_summary.split(" ")
out_pos, out_rpos, out_field = [], [], []
out_pos_bpe, out_rpos_bpe, out_field_bpe = [], [], []
out_bpe, _ = enc.encode(summary.strip())
out_bpe_len = len(out_bpe)
for ind in range(out_bpe_len):
out_pos_bpe.append(0)
out_rpos_bpe.append(0)
for ind in range(out_bpe_len):
out_field_bpe.append('#')
for ind in range(len(tem_summary_list)):
out_pos.append(0)
out_rpos.append(0)
for ind in range(len(tem_summary_list)):
out_field.append('#')
for (this_name, this_value) in box_field_list:
this_value_dict = {}
this_pos_bpe_dict = {}
prev = 1
for ind, each_token in enumerate(this_value.split(" ")):
# if each_token not in this_value_dict:
this_value_dict[each_token] = ind + 1
if this_name != "name":
each_token = " " + each_token
else:
if ind != 0:
each_token = " " + each_token
bpe_tokens, bpe_tokens_original = enc.encode(each_token)
# (start ind, len)
this_pos_bpe_dict[ind + 1] = (prev, len(bpe_tokens))
prev += len(bpe_tokens)
if this_name == "name":
bpe_value = this_value
else:
bpe_value = " " + this_value
bpe_tokens, bpe_tokens_original = enc.encode(bpe_value)
this_value_bpe_len = len(bpe_tokens)
this_value_list_len = len(this_value.split(" "))
if " " + this_value + " " in out_summary:
out_summary = out_summary.replace(" " + this_value + " ", " " + ("<" + this_name + "> ") * this_value_list_len)
# name
elif out_summary.startswith(this_value + " "):
out_summary = out_summary.replace(this_value + " ", ("<" + this_name + "> ") * this_value_list_len)
# nationality
elif this_value in dem_map:
this_value_list = dem_map[this_value]
for this_value in this_value_list:
this_value_list_len = len(this_value.split(" "))
if " " + this_value + " " in out_summary:
out_summary = out_summary.replace(" " + this_value + " ", " " + ("<" + this_name + "> ") * this_value_list_len)
else:
# seperate nationality
is_dem_match = 0
this_value_list = this_value.split(" , ")
if len(this_value_list) > 1:
for each_con in this_value_list:
if " " + each_con + " " in out_summary and each_con in dem_map:
each_con_len = len(each_con.split(" "))
out_summary = out_summary.replace(" " + each_con + " ", " " + ("<" + this_name + "> ") * each_con_len)
is_dem_match = 1
break
if each_con in dem_map:
this_con_list = dem_map[each_con]
for this_con in this_con_list:
if " " + this_con + " " in out_summary:
this_con_len = len(this_con.split(" "))
this_con_len = len(this_con.split(" "))
out_summary = out_summary.replace(" " + this_con + " ", " " + ("<" + this_name + "> ") * this_con_len)
is_dem_match = 1
break
if is_dem_match:
continue
out_summary = fuzzy_match_rep(out_summary, this_value, this_name)
assert len(out_summary.split(" ")) == len(tem_summary_list)
for ind, token in enumerate(out_summary.split(" ")):
if token == "<" + this_name + ">":
out_field[ind] = this_name
ori_token = tem_summary_list[ind]
if ori_token in this_value_dict:
out_pos[ind] = this_value_dict[ori_token]
out_rpos[ind] = this_value_list_len - (out_pos[ind] - 1)
# convert to bpe
ori_token_bpe = ori_token
if ind != 0:
ori_token_bpe = " " + ori_token
if ind > 0:
past = tem_summary_list[:ind]
past = " ".join(past)
bpe_past, _ = enc.encode(past)
past_len = len(bpe_past)
else:
past_len = 0
bpe_tokens, bpe_tokens_original = enc.encode(ori_token_bpe)
for it in range(len(bpe_tokens)):
out_field_bpe[past_len + it] = this_name
if ori_token in this_value_dict:
bpe_pos_start, bpe_pos_len = this_pos_bpe_dict[out_pos[ind]]
for it in range(bpe_pos_len):
start = bpe_pos_start + it
end = this_value_bpe_len - (start - 1)
if start > 30:
start = 30
if end > 30:
end = 30
if past_len + it >= len(out_pos_bpe):
this_id = past_len
else:
this_id = past_len + it
out_pos_bpe[this_id] = start
out_rpos_bpe[this_id] = end
bpe_tokens, bpe_tokens_original = enc.encode(summary.strip())
bpe_test = " ".join(bpe_tokens_original)
assert len(out_summary.split(" ")) == len(tem_summary_list)
assert len(out_field) == len(tem_summary_list)
assert len(tem_summary_list) == len(out_pos)
assert len(tem_summary_list) == len(out_rpos)
assert len(out_field_bpe) == len(bpe_tokens)
assert len(out_pos_bpe) == len(bpe_tokens)
assert len(out_rpos_bpe) == len(bpe_tokens)
out_s.write(" ".join(out_field_bpe) + "\n")
out_p.write(" ".join([str(tmp) for tmp in out_pos_bpe]) + "\n")
out_rp.write(" ".join([str(tmp) for tmp in out_rpos_bpe]) + "\n")
out_s.close()
out_p.close()
out_rp.close()
def gen_context(subdir):
"""
Process box data to use as input to GPT
Args:
subdir: str, root path
Returns:
None
"""
boxes = []
context = []
for split in ["train", "valid", "test"]:
boxes.append(os.path.join(subdir, "original_data", split + ".box"))
context.append(os.path.join(subdir, "processed_data", split, split + ".context"))
avg_len = 0
num = 0
for ind, fboxes in enumerate(boxes):
box = open(fboxes, "r").read().strip().split('\n')
context_out = open(context[ind], "w")
for ib in box:
ib = ib.replace("-lrb-", "(")
ib = ib.replace("-rrb-", ")")
item = ib.split('\t')
box_out_list, _ = join_box(item)
write_line = []
for (this_name, this_value) in box_out_list:
if '<none>' in this_value:
continue
to_write = ""
if this_name == "name":
# for humans
if domain == "humans":
to_write = this_value + " ,"
# to_write = "name ,"
# for books
if domain == "books":
to_write = "title : " + this_value + " ,"
# for songs
if domain == "songs":
to_write = "song name : " + this_value + " ,"
else:
write_value = " " + this_value
write_name = " " + this_name.replace("_", " ")
to_write = write_name + " :" + write_value + " ,"
tokens, tokens_original = enc.encode(to_write)
write_line.extend(tokens)
avg_len += len(write_line)
num += 1
context_out.write(" ".join([str(tmp) for tmp in write_line]) + "\n")
context_out.close()
print(float(avg_len) / num)
def split_infobox(subdir):
"""
extract box content, field type and position information from infoboxes from original_data
*.box.val is the box content (token)
*.box.lab is the field type for each token
*.box.pos is the position counted from the begining of a field
"""
bwfile = []
bffile = []
bpfile = []
boxes = []
for split in ['train', 'test', 'valid']:
bwfile.append(os.path.join(subdir, 'processed_data', split, split + '.box.val'))
bffile.append(os.path.join(subdir, 'processed_data', split, split + '.box.lab'))
bpfile.append(os.path.join(subdir, 'processed_data', split, split + '.box.pos'))
boxes.append(os.path.join(subdir, 'original_data', split + '.box'))
mixb_word, mixb_label, mixb_pos = [], [], []
for fboxes in boxes:
box = open(fboxes, "r").read().strip().split('\n')
box_word, box_label, box_pos = [], [], []
for ib in box:
ib = ib.replace("-lrb-", "(")
ib = ib.replace("-rrb-", ")")
box_single_word, box_single_label, box_single_pos = [], [], []
item = ib.split('\t')
box_out_list, _ = join_box(item)
for (this_name, this_value) in box_out_list:
if '<none>' in this_value:
continue
if this_name != "name":
this_value = " " + this_value
tokens, tokens_original = enc.encode(this_value)
for ind, each_token in enumerate(tokens_original):
box_single_word.append(each_token)
box_single_label.append(this_name)
box_single_pos.append(ind + 1 if ind + 1<=30 else 30)
box_word.append(box_single_word)
box_label.append(box_single_label)
box_pos.append(box_single_pos)
mixb_word.append(box_word)
mixb_label.append(box_label)
mixb_pos.append(box_pos)
for k, m in enumerate(mixb_word):
with open(bwfile[k], "w+") as h:
for items in m:
for sens in items:
h.write(str(sens) + " ")
h.write('\n')
for k, m in enumerate(mixb_label):
with open(bffile[k], "w+") as h:
for items in m:
for sens in items:
h.write(str(sens) + " ")
h.write('\n')
for k, m in enumerate(mixb_pos):
with open(bpfile[k], "w+") as h:
for items in m:
for sens in items:
h.write(str(sens) + " ")
h.write('\n')
def reverse_pos(subdir):
"""
get the position counted from the end of a field
Args:
subdir: str, root directory
Returns:
None
"""
bpfile = []
bwfile = []
for split in ['train', 'test', 'valid']:
bpfile.append(os.path.join(subdir, 'processed_data', split, split + '.box.pos'))
bwfile.append(os.path.join(subdir, 'processed_data', split, split + '.box.rpos'))
for k, pos in enumerate(bpfile):
box = open(pos, "r").read().strip().split('\n')
reverse_pos = []
for bb in box:
pos = bb.split()
tmp_pos = []
single_pos = []
for p in pos:
if int(p) == 1 and len(tmp_pos) != 0:
single_pos.extend(tmp_pos[::-1])
tmp_pos = []
tmp_pos.append(p)
single_pos.extend(tmp_pos[::-1])
reverse_pos.append(single_pos)
with open(bwfile[k], 'w+') as bw:
for item in reverse_pos:
bw.write(" ".join(item) + '\n')
def check_generated_box(subdir):
"""
Check len of input data matches
Args:
subdir: str, root path
Returns:
None
"""
ftrain = []
ftest = []
fvalid = []
for fp in [".box.val", ".box.lab", ".box.pos", ".box.rpos"]:
ftrain.append(os.path.join(subdir, 'processed_data', "train", "train" + fp))
ftest.append(os.path.join(subdir, 'processed_data', "test", "test" + fp))
fvalid.append(os.path.join(subdir, 'processed_data', "valid", "valid" + fp))
for case in [ftrain, ftest, fvalid]:
vals = open(case[0], 'r').read().strip().split('\n')
labs = open(case[1], 'r').read().strip().split('\n')
poses = open(case[2], 'r').read().strip().split('\n')
rposes = open(case[3], 'r').read().strip().split('\n')
assert len(vals) == len(labs)
assert len(poses) == len(labs)
assert len(rposes) == len(poses)
for val, lab, pos, rpos in zip(vals, labs, poses, rposes):
vval = val.strip().split(' ')
llab = lab.strip().split(' ')
ppos = pos.strip().split(' ')
rrpos = rpos.strip().split(' ')
if len(vval) != len(llab) or len(llab) != len(ppos) or len(ppos) != len(rrpos):
print(case)
print(val)
print(len(vval))
print(len(llab))
print(len(ppos))
print(len(rrpos))
assert len(vval) == len(llab)
assert len(llab) == len(ppos)
assert len(ppos) == len(rrpos)
def split_summary_for_rouge(subdir):
"""
Write each valid and test each example into a different file
Args:
domain: str, root folder
Returns:
"""
bpfile = []
bwfile = []
for split in ["valid", "test"]:
bpfile.append(os.path.join(subdir, 'original_data', split + '.summary'))
bwfile.append(os.path.join(subdir, 'processed_data', split, split + '_split_for_rouge'))
for i, fi in enumerate(bpfile):
fread = open(fi, 'r')
k = 0
for line in fread:
with open(bwfile[i] + '/gold_summary_' + str(k), 'w') as sw:
sw.write(line.strip() + '\n')
k += 1
fread.close()
def table2id(subdir, merge_field_vocab, dem_file):
"""
Main pre-processing script that creates masked summaries, writes out tokenized field, value,
summary and masked summary
Args:
domain: str, root path
Returns:
None
"""
fvals = []
flabs = []
fsums = []
fvals2id = []
flabs2id = []
fsums2id = []
f_local_vocab = []
f_decoder_field = []
f_decoder_field_id = []
f_decoder_pos = []
f_decoder_rpos = []
boxes = []
for split in ["train", "test", "valid"]:
fvals.append(os.path.join(subdir, 'processed_data', split, split + '.box.val'))
flabs.append(os.path.join(subdir, 'processed_data', split, split + '.box.lab'))
fsums.append(os.path.join(subdir, 'original_data', split + '.summary'))
fvals2id.append(os.path.join(subdir, 'processed_data', split, split + '.box.val.id'))
flabs2id.append(os.path.join(subdir, 'processed_data', split, split + '.box.lab.id'))
fsums2id.append(os.path.join(subdir, 'processed_data', split, split + '.summary.id'))
f_local_vocab.append(os.path.join(subdir, 'processed_data', split, split + '_local_oov.txt'))
f_decoder_field.append(os.path.join(subdir, 'processed_data', split, split + '_summary_field.txt'))
f_decoder_field_id.append(os.path.join(subdir, 'processed_data', split, split + '_summary_field_id.txt'))
f_decoder_pos.append(os.path.join(subdir, 'processed_data', split, split + '_summary_pos.txt'))
f_decoder_rpos.append(os.path.join(subdir, 'processed_data', split, split + '_summary_rpos.txt'))
boxes.append(os.path.join(subdir, 'original_data', split + '.box'))
# write field to word mapping
key_map = dict()
key_map['#'] = 0
cnt = 1
with open(merge_field_vocab, "r") as v:
for line in v:
key = line.strip().split()[0]
key_map[key] = cnt
cnt += 1
key2id = key_map
id2key = {value: key for key, value in key_map.items()}
print(len(key_map))
# add for field id to word group mapping
keyid2wordlist = dict()
for i in range(0, len(id2key)):
if i == 0:
bpe_in = id2key[i].replace("_", " ")
else:
bpe_in = " " + id2key[i].replace("_", " ")
bpe_tokens, bpe_token_original = enc.encode(bpe_in)
keyid2wordlist[i] = bpe_tokens
if len(keyid2wordlist[i]) > 3:
keyid2wordlist[i] = keyid2wordlist[i][:3]
else:
extended = 3 - len(keyid2wordlist[i])
keyid2wordlist[i] += ([field_empty] * extended)
field2word_file = os.path.join(subdir, "processed_data", "field2word.txt")
with open(field2word_file, "w") as f:
for each_id in keyid2wordlist:
f.write(str(each_id) + "\t" + " ".join([str(tmp) for tmp in keyid2wordlist[each_id]]) + "\n")
# write out field data tokens
for k, ff in enumerate(flabs):
fi = open(ff, 'r')
fo = open(flabs2id[k], 'w')
for line in fi:
items = line.strip().split()
# print (items)
res_items = []
for key in items:
if key in key2id:
res_items.append(str(key2id[key]))
else:
res_items.append("0")
fo.write(" ".join(res_items) + '\n')
fi.close()
fo.close()
# gen field masked summary
for k, (fs, fb) in enumerate(zip(fsums, boxes)):
gen_mask_field_pos(dem_file, fs, fb, f_decoder_field[k], f_decoder_pos[k], f_decoder_rpos[k])
# write out masked summary tokens
for k, ff in enumerate(f_decoder_field):
fi = open(ff, 'r')
fo = open(f_decoder_field_id[k], 'w')
for line in fi:
items = line.strip().split()
res_items = []
for key in items:
if key in key2id:
res_items.append(str(key2id[key]))
else:
res_items.append("0")
fo.write(" ".join(res_items) + '\n')
fi.close()
fo.close()
# write out summary, value tokens
for k, (fs, fv) in enumerate(zip(fsums, fvals)):
fsum = open(fs)
fsumo = open(fsums2id[k], 'w')
fval = open(fv)
fvalo = open(fvals2id[k], 'w')
lines_sum = fsum.readlines()
lines_val = fval.readlines()
for line_sum, line_val in zip(lines_sum, lines_val):
line_val_list = line_val.strip().split()
res_val_list = []
for bpe_token in line_val_list:
if bpe_token in enc.encoder:
res_val_list.append(str(enc.encoder[bpe_token]))
else:
res_val_list.append(str(enc.encoder["#"]))
# res_val_list = [str(enc.encoder[bpe_token]) for bpe_token in line_val_list]
fvalo.write(" ".join(res_val_list) + "\n")
line_sum = line_sum.strip()
line_sum = line_sum.replace("-lrb-", "(")
line_sum = line_sum.replace("-rrb-", ")")
res_sum_list, _ = enc.encode(line_sum)
fsumo.write(" ".join([str(tmp) for tmp in res_sum_list]) + "\n")
fsumo.close()
fvalo.close()
def get_train_vocab_bpe(subdir):
"""
get train vocab of gpt data. return the mask
Args:
subdir: str, root path
Returns:
None
"""
summary_in = os.path.join(subdir, 'original_data', 'train.summary')
box_in = os.path.join(subdir, 'original_data', 'train.box')
out_vocab = os.path.join(subdir, 'processed_data', 'vocab_local.txt')
vocab = []
enc = encoder.get_encoder("117M")
with open(summary_in) as f:
for line in f:
line = line.strip()
tokens, tokens_original = enc.encode(line)
for token in tokens:
if token not in vocab:
vocab.append(token)
with open(box_in) as f:
for line in f:
line_list = line.strip().split("\t")
out_list, sorted_by_second = join_box(line_list)
for (this_name, this_value) in out_list:
bpe_in = " " + this_name.replace("_", " ")
tokens, tokens_original = enc.encode(bpe_in)
for token in tokens:
if token not in vocab:
vocab.append(token)
if this_name != "name":
bpe_in = " " + this_value
else:
bpe_in = this_value
tokens, tokens_original = enc.encode(bpe_in)
for token in tokens:
if token not in vocab:
vocab.append(token)
if field_empty not in vocab:
vocab.append(field_empty)
if eos not in vocab:
vocab.append(eos)
print(len(vocab))
res_mask = []
for ind in range(0, 50257):
if ind in vocab:
res_mask.append(str(1))
else:
res_mask.append(str(0))
with open(out_vocab, "w") as f:
f.write(" ".join(res_mask))
def preprocess(subdir, merge_field_vocab, dem_file):
"""
We use a triple <f, p+, p-> to represent the field information of a token in the specific field.
p+&p- are the position of the token in that field counted from the begining and the end of the field.
For example, for a field (birthname, <NAME>) in an infoboxes, we represent the field as
(Jurgis, <birthname, 1, 2>) & (Mikelatitis, <birthname, 2, 1>)
"""
print("extracting token, field type and position info from original data ...")
time_start = time.time()
split_infobox(subdir)
reverse_pos(subdir)
duration = time.time() - time_start
print("extract finished in %.3f seconds" % float(duration))
print("spliting test and valid summaries for ROUGE evaluation ...")
time_start = time.time()
split_summary_for_rouge(subdir)
duration = time.time() - time_start
print("split finished in %.3f seconds" % float(duration))
print("turning words and field types to ids ...")
time_start = time.time()
table2id(subdir, merge_field_vocab, dem_file)
duration = time.time() - time_start
print("idlization finished in %.3f seconds" % float(duration))
print("get vocab for train set")
get_train_vocab_bpe(subdir)
print("generate prefix table")
gen_context(subdir)
def make_dirs(subdir):
"""
Make directoies
Args:
subdir: Root directory
Returns:
None
"""
os.mkdir(os.path.join(subdir, "processed_data"))
os.mkdir(os.path.join(subdir, "processed_data", "train"))
os.mkdir(os.path.join(subdir, "processed_data", "test"))
os.mkdir(os.path.join(subdir, "processed_data", "valid"))
os.mkdir(os.path.join(subdir, "processed_data", "test", "test_split_for_rouge"))
os.mkdir(os.path.join(subdir, "processed_data", "valid", "valid_split_for_rouge"))
if __name__ == '__main__':
root_path = sys.argv[1]
domain = sys.argv[2]
subdir = os.path.join(root_path, domain)
dem_file = os.path.join(root_path, "demonyms.csv")
merge_field_vocab = os.path.join(root_path, "human_books_songs_films_field_vocab.txt")
make_dirs(subdir)
preprocess(subdir, merge_field_vocab, dem_file)
check_generated_box(subdir)
print("check done")
``` |
{
"source": "john-kurkowski/git-subtree-update",
"score": 3
} |
#### File: git-subtree-update/git_subtree_remote/command.py
```python
import os
import click
import git
from .diff import print_diverged
from .diff import print_subtree_diff
from .diff import print_up_to_date
from .local import validate_subtrees
from .remote import rate_limit_find_subtree_remote
def validate_subtree_remotes(local_repo, is_all, prefixes):
try:
subtrees = validate_subtrees(local_repo, is_all, prefixes)
except ValueError as verr:
raise click.BadParameter(verr.message)
rate_limited_find = rate_limit_find_subtree_remote()
with click.progressbar(subtrees, label='Finding subtree remotes') as progressbar:
return [
rate_limited_find(subtree)
for subtree in progressbar
]
@click.group()
def cli():
pass
@cli.command()
@click.option(
'is_all', '--all',
is_flag=True,
help='''Update all subtrees in the repo.''',
)
@click.argument('prefixes', 'prefix', nargs=-1)
def diff(is_all, prefixes):
'''Diff the given subtrees. Divines their remote from the given prefix.
Prompts the user when there are multiple possibilities.'''
local_repo = git.Repo(os.getcwd())
subtree_remotes = validate_subtree_remotes(local_repo, is_all, prefixes)
print_subtree_diff(subtree_remotes)
@cli.command()
@click.option(
'is_all', '--all',
is_flag=True,
help='''Update all subtrees in the repo.''',
)
@click.option(
'squash', '--squash',
is_flag=True,
help='Pass through `git subtree --squash ...`',
)
@click.argument('prefixes', 'prefix', nargs=-1)
def pull(is_all, squash, prefixes):
'''Add or update the given subtrees. Divines their remote from the given
prefix. Prompts the user when there are multiple possibilities.'''
local_repo = git.Repo(os.getcwd())
subtree_remotes = validate_subtree_remotes(local_repo, is_all, prefixes)
failures = []
updating_label = 'Updating {} subtree(s)'.format(len(subtree_remotes))
with click.progressbar(subtree_remotes, label=updating_label) as progressbar:
for remote in progressbar:
subtree_args = [remote.repo['git_url'], 'master']
subtree_kwargs = {'prefix': remote.subtree.prefix}
if squash:
subtree_kwargs['squash'] = True
if not remote.subtree.exists:
subtree_args.insert(0, 'add')
local_repo.git.subtree(*subtree_args, **subtree_kwargs)
elif remote.is_diverged:
click.echo('') # Newline after surrounding progress bar
print_diverged(remote)
click.echo('Skipping...')
failures.append(remote)
elif remote.is_ahead:
subtree_args.insert(0, 'pull')
local_repo.git.subtree(*subtree_args, **subtree_kwargs)
else:
click.echo('') # Newline after surrounding progress bar
print_up_to_date(remote)
click.echo(local_repo.git.status())
if failures:
click.echo()
raise click.ClickException('some subtrees were skipped: {}'.format(
', '.join(subtree.repo['html_url'] for subtree in failures)
))
``` |
{
"source": "johnkussack/mxoknowledge",
"score": 2
} |
#### File: mxoknowledge/parsers/main.py
```python
from mxo_packmap_extractor import *
from mxo_dss_parser import *
from mxo_prop_model_parser import *
from mxo_eprf_model_parser import *
from mxo_iprf_model_parser import *
from mxo_moa_model_parser import *
from mxo_mga_model_parser import *
output_folder = "./output/"
def save_file(data, full_path, source_extensions ,desired_ext):
full_path = full_path.lower().split("/")[-1]
for s in source_extensions:
full_path = full_path.replace(".%s" % s, ".%s" % desired_ext)
with open(output_folder + full_path,"wb+") as f:
f.write(data)
def parse_textures():
dss_textures = [
"../res/img/texture.txa",
"../res/img/succubus_int.txa",
"../res/img/sphinx_metal.txa",
"../res/img/dt_apt03_livflrdtl.txa",
"../res/img/tm_0000_0000.txb",
"../res/img/SWmap_slums_barrens.txb",
"../res/img/tutorial_v2_minimap.txb",
"./extracted/metro_map.txa",
]
for t in dss_textures:
raw_data = DssParser.parse_file(t)
save_file(raw_data, t, ["txa", "txb"], "dds")
""" ###### Run the prop exporter ####"""
def parse_props():
props = [
"../res/prop/van_wheelless.prop",
"../res/prop/succubus.prop",
"./extracted/fire_ext01.prop",
]
for p in props:
raw_data = PropParser.parse_file(p)
save_file(bytearray(raw_data,"ascii"), p, ["prop"], "obj")
def parse_eprfs():
eprfs = ["../res/building/sl_church01.eprf",
"../res/building/street_1l_10x10end.eprf",
"./extracted/sl_projects02_s09_big_facade.eprf"
]
for e in eprfs:
raw_data = EprfParser.parse_file(e)
save_file(bytearray(raw_data,"ascii"), e, ["eprf"], "obj")
def parse_iprfs():
iprfs = ["../res/building/null_01x01_ext_wood.iprf",
]
for i in iprfs:
raw_data = IprfParser.parse_file(i)
save_file(bytearray(raw_data,"ascii"), i, ["iprf"], "obj")
def parse_moas():
moas = ["./extracted/temp_switch.moa",
]
for m in moas:
raw_data = MoaParser.parse_file(m)
#save_file(bytearray(raw_data,"ascii"), m, ["moa"], "txt")
def parse_mgas():
mgas = ["./extracted/MorphHead.mga",
]
for m in mgas:
raw_data = MgaParser.parse_file(m)
save_file(bytearray(raw_data,"ascii"), m, ["mga"], "obj")
def extract_files():
packmap_save_path = "../res/packmap_save/packmap_save.lta"
packmap_path = "../packmaps/"
key = "02000040"
output_path = "./extracted/"
PackmapExtractor.extract_file(key, packmap_save_path,packmap_path,output_path)
extract_files()
#parse_textures()
#parse_props()
#parse_eprfs()
#parse_moas()
#parse_mgas()
```
#### File: mxoknowledge/parsers/mxo_packmap_extractor.py
```python
from binarywalker import *
class PackmapExtractor:
def __init__(self):
pass
@staticmethod
def extract_file(key, packmap_save_location,packmap_folder, dest_full_path):
data = ""
try:
with open(packmap_save_location) as f:
data = f.read().split("\n")
except Exception as error:
print("Failed to open the packmap location", error)
key = key.lower()
lines = [k for k in data if key in k.lower()]
if len(lines) == 0:
print("Entry not found for key '%s'" % key)
return
line = lines[0].split("\"")
file_name = line[1].split("/")[-1]
file_location = line[3].split("/")[-1]
offset, size = [int(k,16) for k in line[4].lstrip().split(" ")]
try:
raw_data = bytearray()
with open(packmap_folder+file_location,"rb") as f:
f.seek(offset)
raw_data+= f.read(size)
with open(dest_full_path+file_name,"wb+") as f:
f.write(raw_data)
except Exception as error:
print("Failed to read the packmap file contents", error)
return raw_data
``` |
{
"source": "johnkylecooper/hot-billboard-bot",
"score": 4
} |
#### File: johnkylecooper/hot-billboard-bot/my_twitter_bot.py
```python
import os
import time
from bot_utility import get_song_info
print('this is my twitter bot')
# Collect Keys
keys = open("keys.txt").read().split()
import twitter
api = twitter.Api(consumer_key=keys[0],
consumer_secret=keys[1],
access_token_key=keys[2],
access_token_secret=keys[3])
def tweet():
song, lnk, year, H = get_song_info()
file1 = open("songs.txt")
past_songs = file1.read()
file1.close()
while song[H[1]] in past_songs:
print('discovered repeat...')
print('generating new song info...')
song, lnk, year, H = get_song_info()
status = 'Entering the Billboard Hot 100 top-ten singles on '\
+ song[H[0]] + ', ' + str(year) + ', "' + song[H[1]] + '" by '\
+ song[H[2]] + ' reached its peak on '\
+ song[H[4]] + ', ' + str(year) + '. ' + lnk
print(status)
# Write to a text file that lists the already posted songs
file1 = open("songs.txt","a")
file1.write(song[H[1]]+'\n')
file1.close()
api.PostUpdate(status)
tweet()
``` |
{
"source": "JOHNKYON/kaggle_HCDS",
"score": 3
} |
#### File: kaggle_HCDS/sample_script/manual_feature_engineering.py
```python
import pandas as pd
import numpy as np
import gc
import lightgbm as lgb
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
plt.style.use('fivethirtyeight')
# Plot the distribution of a variable colored by value of the target
def kde_target(var_name, df):
# Calculate the correlation coeffienct between the new variable and the target
corr = df['TARGET'].corr(df[var_name])
# Calculate medians for repaid vs not repaid
avg_repaid = df.loc[df['TARGET'] == 0, var_name].median()
avg_not_repaid = df.loc[df['TARGET'] == 1, var_name].median()
plt.figure(figsize=(12, 6))
# Plot the distribution for target == 0 and target == 1
sns.kdeplot(df.loc[df['TARGET'] == 0, var_name].dropna(), label='TARGET == 0')
sns.kdeplot(df.loc[df['TARGET'] == 1, var_name].dropna(), label='TARGET == 1')
# Lable the plot
plt.xlabel(var_name)
plt.ylabel('Density')
plt.title('%s Distribution' % var_name)
plt.legend()
# Print out the correlation
print('The correlation between %s and the TARGET is %0.4f' % (var_name, corr))
# Print out average values
print('Median value for loan that was not repaid =\t %0.4f' % avg_not_repaid)
print('Median value for loan that was repaid =\t %0.4f' % avg_repaid)
def agg_numeric(df, group_var, df_name):
"""Aggregates the numeric values in a dataframe. This can
be used to create features for each instance of the grouping variable.
Parameters
--------
df (dataframe):
the dataframe to calculate the statistics on
group_var (string):
the variable by which to group df
df_name (string):
the variable used to rename the columns
Return
--------
agg (dataframe):
a dataframe with the statistics aggregated for
all numeric columns. Each instance of the grouping variable will have
the statistics (mean, min, max, sum; currently supported) calculated.
The columns are also renamed to keep track of features created.
"""
# Remove id variables other than grouping variable
for col in df:
if col != group_var and 'SK_ID' in col:
df = df.drop(columns=col)
group_ids = df[group_var]
numeric_df = df.select_dtypes('number')
numeric_df[group_var] = group_ids
# Group by the specified variable and calculate the statistics
agg = numeric_df.groupby(group_var).agg(['count', 'mean', 'max', 'min', 'sum']).reset_index()
# Need to create new column names
columns = [group_var]
# Iterate through the variables names
for var in agg.columns.levels[0]:
# Skip the grouping variable
if var != group_var:
# Iterate through the stat names
for stat in agg.columns.levels[1][:-1]:
# Make a new column name for the variable and stat
columns.append('%s_%s_%s' % (df_name, var, stat))
agg.columns = columns
return agg
def count_categorical(df, group_var, df_name):
"""Computes counts and normalized counts for each observation
of `group_var` of each unique category in every categorical variable
Parameters
--------
df : dataframe
The dataframe to calculate the value counts for.
group_var : string
The variable by which to group the dataframe. For each unique
value of this variable, the final dataframe will have one row
df_name : string
Variable added to the front of column names to keep track of columns
Return
--------
categorical : dataframe
A dataframe with counts and normalized counts of each unique category in every categorical variable
with one row for every unique value of the `group_var`.
"""
# Select the categorical columns
categorical = pd.get_dummies(df.select_dtypes('object'))
# Make sure to put the identifying id on the column
categorical[group_var] = df[group_var]
# Groupby the group var and calculate the sum and mean
categorical = categorical.groupby(group_var).agg(['sum', 'mean'])
column_names = []
# Iterate through the columns in level 0
for var in categorical.columns.levels[0]:
# Iterate through the stats in level 1
for stat in ['count', 'count_norm']:
# Make a new column name
column_names.append('%s_%s_%s' % (df_name, var, stat))
categorical.columns = column_names
return categorical
# Read in new copies of all the dataframes
train = pd.read_csv('../data/application_train.csv')
bureau = pd.read_csv('../data/bureau.csv')
bureau_balance = pd.read_csv('../data/bureau_balance.csv')
bureau_counts = count_categorical(bureau, group_var='SK_ID_CURR', df_name='bureau')
bureau_agg = agg_numeric(bureau.drop(columns=['SK_ID_BUREAU']),
group_var='SK_ID_CURR', df_name='bureau')
bureau_balance_counts = count_categorical(bureau_balance, group_var='SK_ID_BUREAU', df_name='bureau_balance')
bureau_balance_agg = agg_numeric(bureau_balance, group_var='SK_ID_BUREAU', df_name='bureau_balance')
# Dataframe grouped by the loan
bureau_by_loan = bureau_balance_agg.merge(bureau_balance_counts,
right_index = True, left_on = 'SK_ID_BUREAU', how='outer')
# Merge to include the SK_ID_CURR
bureau_by_loan = bureau[['SK_ID_BUREAU', 'SK_ID_CURR']].merge(bureau_by_loan, on='SK_ID_BUREAU', how='left')
# Aggregate the stats for each client
bureau_balance_by_client = agg_numeric(bureau_by_loan.drop(columns=['SK_ID_BUREAU']),
group_var='SK_ID_CURR', df_name='client')
original_features = list(train.columns)
print('Original Number of Features: ', len(original_features))
# Merge with the value counts of bureau
train = train.merge(bureau_counts, on='SK_ID_CURR', how='left')
# Merge with the stats of bureau
train = train.merge(bureau_agg, on='SK_ID_CURR', how='left')
# Merge with the monthly information grouped by client
train = train.merge(bureau_balance_by_client, on='SK_ID_CURR', how='left')
new_features = list(train.columns)
print('Number of features using previous loans from other institutions data: ', len(new_features))
# Function to calculate missing values by column
def missing_values_table(df):
# Total missing values
mis_val = df.isnull().sum()
# Percentage of missing values
mis_val_percent = 100 * df.isnull().sum() / len(df)
# Make a table with the results
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
# Rename the columns
mis_val_table_ren_columns = mis_val_table.rename(
columns={0: 'Missing Values', 1: '% of Total Values'})
# Sort the table by percentage of missing descending
mis_val_table_ren_columns = mis_val_table_ren_columns[
mis_val_table_ren_columns.iloc[:, 1] != 0].sort_values(
'% of Total Values', ascending=False).round(1)
# Print some summary information
print("Your selected dataframe has " + str(df.shape[1]) + " columns.\n"
"There are " + str(mis_val_table_ren_columns.shape[0]) +
" columns that have missing values.")
# Return the dataframe with missing information
return mis_val_table_ren_columns
missing_train = missing_values_table(train)
missing_train_vars = list(missing_train.index[missing_train['% of Total Values'] > 90])
# Read in the test dataframe
test = pd.read_csv('../data/application_test.csv')
# Merge with the value counts of bureau
test = test.merge(bureau_counts, on='SK_ID_CURR', how='left')
# Merge with the stats of bureau
test = test.merge(bureau_agg, on='SK_ID_CURR', how='left')
# Merge with the value counts of bureau balance
test = test.merge(bureau_balance_by_client, on='SK_ID_CURR', how='left')
print('Shape of Testing Data: ', test.shape)
train_labels = train['TARGET']
# Align the dataframes, this will remove the 'TARGET' column
train, test = train.align(test, join='inner', axis=1)
train['TARGET'] = train_labels
print('Training Data Shape: ', train.shape)
print('Testing Data Shape ', test.shape)
missing_test = missing_values_table(test)
missing_test_vars = list(missing_test.index[missing_test['% of Total Values'] > 90])
len(missing_test_vars)
missing_columns = list(set(missing_test_vars+missing_train_vars))
print('There are %d columns with more than 90%% missing in either the training or testing data.'
% len(missing_columns))
# Drop the missing columns
train = train.drop(columns=missing_columns)
test = test.drop(columns=missing_columns)
train.to_csv('train_bureau_raw.csv', index=False)
test.to_csv('test_bureau_raw.csv', index=False)
# Calculate all correlations in dataframe
corrs = train.corr()
corrs = corrs.sort_values('TARGET', ascending=False)
# Set the threshold
threshold = 0.8
# Empty dictionary to hold correlated variables
above_threshold_vars = {}
# For each column, record the variables that are above the threshold
for col in corrs:
above_threshold_vars[col] = list(corrs.index[corrs[col] > threshold])
# Track columns to remove and columns already examined
cols_to_remove = []
cols_seen = []
cols_to_remove_paire = []
# Iterate through columns and correlated columns
for key, value in above_threshold_vars.items():
# Keep track of columns already examined
cols_seen.append(key)
for x in value:
if x == key:
next
else:
# Only want to remove on in a pair
if x not in cols_seen:
cols_to_remove.append(x)
cols_to_remove_paire.append(key)
cols_to_remove = list(set(cols_to_remove))
print('Number of columns to remove: ', len(cols_to_remove))
train_corrs_removed = train.drop(columns=cols_to_remove)
test_corrs_removed = test.drop(columns=cols_to_remove)
print('Training Corrs Removed Shape: ', train_corrs_removed.shape)
print('Test Corrs Removed ShapeL ', test_corrs_removed.shape)
train_corrs_removed.to_csv('train_bureau_corrs_removed.csv', index=False)
test_corrs_removed.to_csv('test_bureau_corrs_removed.csv', index=False)
def model(features, test_features, encoding='ohe', n_folds=5):
"""Train and test a light gradient boosting model using
cross validation.
Parameters
--------
features (pd.DataFrame):
dataframe of training features to use
for training a model. Must include the TARGET column.
test_features (pd.DataFrame):
dataframe of testing features to use
for making predictions with the model.
encoding (str, default = 'ohe'):
method for encoding categorical variables. Either 'ohe' for one-hot encoding or 'le' for integer label encoding
n_folds (int, default = 5): number of folds to use for cross validation
Return
--------
submission (pd.DataFrame):
dataframe with `SK_ID_CURR` and `TARGET` probabilities
predicted by the model.
feature_importances (pd.DataFrame):
dataframe with the feature importances from the model.
valid_metrics (pd.DataFrame):
dataframe with training and validation metrics (ROC AUC) for each fold and overall.
"""
# Extract the ids
train_ids = features['SK_ID_CURR']
test_ids = test_features['SK_ID_CURR']
# Extract the labels for training
labels = features['TARGET']
# Remove the ids and target
features = features.drop(columns=['SK_ID_CURR', 'TARGET'])
test_features = test_features.drop(columns=['SK_ID_CURR'])
# One Hot Encoding
if encoding == 'ohe':
features = pd.get_dummies(features)
test_features = pd.get_dummies(test_features)
# Align the dataframes by the columns
features, test_features = features.align(test_features, join='inner', axis=1)
# No categorical indices to record
cat_indices = 'auto'
# Integer label encoding
elif encoding == 'le':
# Create a label encoder
label_encoder = LabelEncoder()
# List for sotring categorical indices
cat_indices = []
# Interate through each column
for i, col in enumerate(features):
if features[col].dtype == 'object':
# Map the categorical features to integers
features[col] = label_encoder.fit_transform(
np.array(features[col].astype(str)).reshape((-1,)))
test_features[col] = label_encoder.transform(
np.array(test_features[col].astype(str)).reshape((-1,)))
# Record the categorical indices
cat_indices.append(i)
# Catch error if label encoding shceme is not valid
else:
raise ValueError("Encoding must be either 'ohe' or 'le'")
print('Training Data Shape: ', features.shape)
print('Testing Data Shape: ', test_features.shape)
# Extract feature names
feature_names = list(features.columns)
# Convert to np arrays
features = np.array(features)
test_features = np.array(test_features)
# Create the kfold object
k_fold = KFold(n_splits=n_folds, shuffle=False, random_state=50)
# Empty array for feature importances
features_importance_values = np.zeros(len(feature_names))
# Empty array for test predictions
test_predictions = np.zeros(test_features.shape[0])
# Empty array for out of fold validation predictions
out_of_fold = np.zeros(features.shape[0])
# Lists for recording validation and training scores
valid_scores = []
train_scores = []
# Iterate through each fold
for train_indices, valid_indices in k_fold.split(features):
# Training data for the fold
train_features, train_labels = features[train_indices], labels[train_indices]
# Validation data for the fold
valid_features, valid_labels = features[valid_indices], labels[valid_indices]
# Create the model
model = lgb.LGBMClassifier(n_estimators=10000, objective='binary',
class_weight='balanced', learning_rate=0.05,
reg_alpha=0.1, reg_lambda=0.1,
subsample=0.8, n_jobs=-1, random_state=50)
# Train the model
model.fit(train_features, train_labels, eval_metric='auc',
eval_set=[(valid_features, valid_labels), (train_features, train_labels)],
eval_names=['valid', 'train'], categorical_feature=cat_indices,
early_stopping_rounds=100, verbose=200)
# Record the best iteration
best_iteration = model.best_iteration_
# Record the featrue importances
features_importance_values += model.feature_importances_ / k_fold.n_splits
# Make predictions
test_predictions += model.predict_proba(test_features, num_iteration=best_iteration)[:, 1] / k_fold.n_splits
# Record the best score
valid_score = model.best_score_['valid']['auc']
train_score = model.best_score_['valid']['auc']
valid_scores.append(valid_score)
train_scores.append(train_score)
# Clean up memory
gc.enable()
del model, train_features, valid_features
gc.collect()
# Make the submission dataframe
submission = pd.DataFrame({'SK_ID_CURR' : test_ids, 'TARGET': test_predictions})
# Make the feature importance dataframe
feature_importances = pd.DataFrame({'feature': feature_names, 'importance': features_importance_values})
# Overall validation score
valid_auc = roc_auc_score(labels, out_of_fold)
# Add the overall scores to the metrics
valid_scores.append(valid_auc)
train_scores.append(np.mean(train_scores))
# Needed for creating dataframe of validation scores
fold_names = list(range(n_folds))
fold_names.append('overall')
# Dataframe of validation scores
metrics = pd.DataFrame({'fold': fold_names,
'train': train_scores,
'valid': valid_scores})
print('Model prediction finished.')
return submission, feature_importances, metrics
def plot_feature_importances(df):
"""
Plot importances returned by a model. This can work with any measure of
feature importance provided that higher importance is better.
Args:
df (dataframe): feature importances. Must have the features in a column
called `features` and the importances in a column called `importance
Returns:
shows a plot of the 15 most importance features
df (dataframe): feature importances sorted by importance (highest to lowest)
with a column for normalized importance
"""
# Sort features according to importance
df = df.sort_values('importance', ascending= False).reset_index()
# Normalize the feature importances to add up to one
df['importance_normalized'] = df['importance'] / df['importance'].sum()
# Make a horizontal bar chart of feature importances
plt.figure(figsize=(10, 6))
ax = plt.subplot()
# Need to reverse the index to plot most important on top
ax.barh(list(reversed(list(df.index[:15]))),
df['importance_normalized'].head(15),
align='center', edgecolor='k')
# Set the yticks and labels
ax.set_yticks(list(reversed(list(df.index[:15]))))
ax.set_yticklabels(df['feature'].head(15))
# Plot labeling
plt.xlabel('Normalized Importance')
plt.title('Feature Importances')
plt.show()
return df
train_control = pd.read_csv('../data/application_train.csv')
test_control = pd.read_csv('../data/application_test.csv')
submission, fi, metrics = model(train_control, test_control)
submission.to_csv('lightGBM.csv', index=False)
```
#### File: kaggle_HCDS/scripts/feature_engineer.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from utils.feature_engineer import count_categorical
from utils.feature_engineer import agg_numeric
from utils.feature_engineer import missing_values_table
from utils.feature_engineer import light_gbm
import warnings
warnings.filterwarnings("ignore")
def feature_engineer(train, test, bureau, bureau_balance, credit_card_balance,
installments_payments, pos_cash_balance, previous_application):
"""
This function read all the data from the competition and do manual feature engineer to it.
:param train:
:param test:
:param bureau:
:param bureau_balance:
:param credit_card_balance:
:param installments_payments:
:param pos_cash_balance:
:param previous_application:
:return: (Dataframe) train
(Datafarme) test
"""
bureau_counts = count_categorical(bureau, group_var='SK_ID_CURR', df_name='bureau')
bureau_agg = agg_numeric(bureau.drop(columns=['SK_ID_BUREAU']), group_var='SK_ID_CURR', df_name='bureau')
bureau_balance_counts = count_categorical(bureau_balance, group_var='SK_ID_BUREAU', df_name='bureau_balance')
bureau_balance_agg = agg_numeric(bureau_balance, group_var='SK_ID_BUREAU', df_name='bureau_balance')
credit_card_balance_counts = count_categorical(credit_card_balance,
group_var='SK_ID_CURR', df_name='credit_card_balance')
credit_card_balance_agg = agg_numeric(credit_card_balance.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='credit_card_balance')
# Reason: Installments_payments_counts table contains no object value.
# installments_payments_counts = count_categorical(installments_payments,
# group_var='SK_ID_CURR', df_name='installments_payments')
installments_payments_agg = agg_numeric(installments_payments.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='installments_payments')
pos_cash_balance_counts = count_categorical(pos_cash_balance, group_var='SK_ID_CURR', df_name='pos_cash_balance')
pos_cash_balance_agg = agg_numeric(pos_cash_balance.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='pos_cash_balance')
previous_application_counts = count_categorical(previous_application,
group_var='SK_ID_CURR', df_name='previous_application_counts')
previous_application_agg = agg_numeric(previous_application.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='previous_application')
# Dataframe grouped by the loan
bureau_by_loan = bureau_balance_agg.merge(bureau_balance_counts,
right_index=True, left_on='SK_ID_BUREAU', how='outer')
# Merge to include the SK_ID_CURR
bureau_by_loan = bureau[['SK_ID_BUREAU', 'SK_ID_CURR']].merge(bureau_by_loan, on='SK_ID_BUREAU', how='left')
# Aggregate the stats for each client
bureau_balance_by_client = agg_numeric(bureau_by_loan.drop(columns=['SK_ID_BUREAU']),
group_var='SK_ID_CURR', df_name='client')
original_features = list(train.columns)
print('Original Number of Features: ', len(original_features))
# TODO: We can also first deal with pos_cash_balance and credit card balance before merge.
# Merge with the value counts of bureau
train = train.merge(bureau_counts, on='SK_ID_CURR', how='left')
# Merge with the stats of bureau
train = train.merge(bureau_agg, on='SK_ID_CURR', how='left')
# Merge with the monthly information grouped by client
train = train.merge(bureau_balance_by_client, on='SK_ID_CURR', how='left')
# Merge with credit card balance counts
train = train.merge(credit_card_balance_counts, on='SK_ID_CURR', how='left')
# Merge with credit card balance agg
train = train.merge(credit_card_balance_agg, on='SK_ID_CURR', how='left')
# Merge with installments payments agg
train = train.merge(installments_payments_agg, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance counts
train = train.merge(pos_cash_balance_counts, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance agg
train = train.merge(pos_cash_balance_agg, on='SK_ID_CURR', how='left')
# Merge with previous_application counts
train = train.merge(previous_application_counts, on='SK_ID_CURR', how='left')
# Merge with previous application agg
train = train.merge(previous_application_agg, on='SK_ID_CURR', how='left')
new_features = list(train.columns)
print('Number of features using previous loans from other institutions data: ', len(new_features))
missing_train = missing_values_table(train)
missing_train_vars = list(missing_train.index[missing_train['% of Total Values'] > 90])
# Test
# Merge with the value counts of bureau
test = test.merge(bureau_counts, on='SK_ID_CURR', how='left')
# Merge with the stats of bureau
test = test.merge(bureau_agg, on='SK_ID_CURR', how='left')
# Merge with the monthly information grouped by client
test = test.merge(bureau_balance_by_client, on='SK_ID_CURR', how='left')
# Merge with credit card balance counts
test = test.merge(credit_card_balance_counts, on='SK_ID_CURR', how='left')
# Merge with credit card balance agg
test = test.merge(credit_card_balance_agg, on='SK_ID_CURR', how='left')
# Merge with installments payments agg
test = test.merge(installments_payments_agg, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance counts
test = test.merge(pos_cash_balance_counts, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance agg
test = test.merge(pos_cash_balance_agg, on='SK_ID_CURR', how='left')
# Merge with previous_application counts
test = test.merge(previous_application_counts, on='SK_ID_CURR', how='left')
# Merge with previous application agg
test = test.merge(previous_application_agg, on='SK_ID_CURR', how='left')
print('Shape of Training Data: ', train.shape)
print('Shape of Testing Data: ', test.shape)
train_labels = train['TARGET']
# Align the dataframes, this will remove the 'TARGET' column
train, test = train.align(test, join='inner', axis=1)
train['TARGET'] = train_labels
print('Training Data Shape: ', train.shape)
print('Testing Data Shape ', test.shape)
missing_test = missing_values_table(test)
missing_test_vars = list(missing_test.index[missing_test['% of Total Values'] > 90])
len(missing_test_vars)
missing_columns = list(set(missing_test_vars + missing_train_vars))
print('There are %d columns with more than 90%% missing in either the training or testing data.'
% len(missing_columns))
# Drop the missing columns
train = train.drop(columns=missing_columns)
test = test.drop(columns=missing_columns)
train.to_csv('train_all_raw.csv', index=False)
test.to_csv('test_all_raw.csv', index=False)
# Calculate all correlations in dataframe
corrs = train.corr()
corrs = corrs.sort_values('TARGET', ascending=False)
# Set the threshold
threshold = 0.8
# Empty dictionary to hold correlated variables
above_threshold_vars = {}
# For each column, record the variables that are above the threshold
for col in corrs:
above_threshold_vars[col] = list(corrs.index[corrs[col] > threshold])
# Track columns to remove and columns already examined
cols_to_remove = []
cols_seen = []
cols_to_remove_paire = []
# Iterate through columns and correlated columns
for key, value in above_threshold_vars.items():
# Keep track of columns already examined
cols_seen.append(key)
for x in value:
if x == key:
next
else:
# Only want to remove on in a pair
if x not in cols_seen:
cols_to_remove.append(x)
cols_to_remove_paire.append(key)
cols_to_remove = list(set(cols_to_remove))
print('Number of columns to remove: ', len(cols_to_remove))
train_corrs_removed = train.drop(columns=cols_to_remove)
test_corrs_removed = test.drop(columns=cols_to_remove)
print('Training Corrs Removed Shape: ', train_corrs_removed.shape)
print('Test Corrs Removed ShapeL ', test_corrs_removed.shape)
train_corrs_removed.to_csv('train_all_corrs_removed.csv', index=False)
test_corrs_removed.to_csv('test_all_corrs_removed.csv', index=False)
return train_corrs_removed, test_corrs_removed
``` |
{
"source": "JOHNKYON/Lab_models",
"score": 3
} |
#### File: Lab_models/src/t_SNE.py
```python
import numpy as np
from sklearn.manifold import TSNE
import codecs
__author__ = "JOHNKYON"
def plot_build(mtr):
"""
将高维数据转化为二维点
:param mtr: np.narray 高维距离矩阵
:return: np.narray 二维点表示
"""
model = TSNE(n_components=2, random_state=0)
np.set_printoptions(suppress=True)
points = model.fit_transform(mtr)
output_file = codecs.open('temp/points.txt', 'wb', 'utf8')
for ele in points:
for x in ele:
output_file.write(str(x))
output_file.write('\t')
output_file.write('\n')
output_file.close()
return points
``` |
{
"source": "JOHNKYON/Sora",
"score": 3
} |
#### File: Sora/SHAP/tree.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
class TreeExplainer:
"""
Class of SHAP explainer for tree ensembles.
Support sklearn.decisionTreeRegressor
"""
def __init__(self, model, **kwargs):
self.tree = Tree(model.tree_)
# Preallocate space for the unique path data
depth = self.tree.max_depth + 2
s = (depth * (depth + 1)) // 2
self.feature_indexes = np.zeros(s, dtype=np.int32)
self.zero_fractions = np.zeros(s, dtype=np.float64)
self.one_fractions = np.zeros(s, dtype=np.float64)
self.pweights = np.zeros(s, dtype=np.float64)
def shap_values(self, x):
"""
Initiation for calculate the SHAP values for each features.
:param x:
:return:
"""
if str(type(x)).endswith("DataFrame'>") or str(type(x)).endswith("Series'>"):
x = x.values
self.n_outputs = self.tree.values.shape[1]
# Only one example
if len(x.shape) == 1:
values = np.zeros((x.shape[0] + 1, self.n_outputs))
x_missing = np.zeros(x.shape[0], dtype=np.bool)
self.tree_shap(self.tree, x, x_missing, values)
if self.n_outputs == 1:
return values[:, 0]
else:
return [values[:, i] for i in range(self.n_outputs)]
# Other cases
else:
values = np.zeros((x.shape[0], x.shape[1] + 1, self.n_outputs))
x_missing = np.zeros(x.shape[1], dtype=np.bool)
for i in range(x.shape[0]):
self.tree_shap(self.tree, x[i, :], x_missing, values[i, :, :])
if self.n_outputs == 1:
return values[:, :, 0]
else:
return [values[:, :, i] for i in range(self.n_outputs)]
def tree_shap(self, tree, x, x_missing, values, condition=0, condition_feature=0):
"""
The algorithm to calculate the SHAP values
:param tree:
:param x:
:param x_missing:
:param values:
:return:
"""
if condition == 0:
values[-1, :] += tree.values[0, :]
# Start the recursive algorithm
tree_shap_recursive(tree.children_left, tree.children_right,
tree.children_default, tree.feature,
tree.threshold, tree.values, tree.node_sample_weight,
x, x_missing, values, 0, 0, self.feature_indexes,
self.zero_fractions, self.one_fractions, self.pweights,
1, 1, -1, condition, condition_feature, 1)
def tree_shap_recursive(children_left, children_right, children_default,
features, thresholds, tree_values,
node_sample_weight, x, x_missing, values, node_index, unique_depth,
parent_feature_indexes, parent_zero_fractions, parent_one_fractions,
parent_pweights, parent_zero_fraction, parent_one_fraction,
parent_feature_index, condition, condition_feature, condition_fraction):
"""
Recursive algorithm to calculate tree shap
:param children_leftm:
:param children_right:
:param features:
:param threshold:
:param tree_values:
:param node_sample_weight:
:param x:
:param x_missing:
:param values:
:param node_index:
:param unique_depth:
:return:
"""
# Stop when there's no weight coming
if condition_fraction == 0:
return
# extend the unique path
feature_indexes = parent_feature_indexes[unique_depth + 1:]
feature_indexes[:unique_depth + 1] = parent_feature_indexes[:unique_depth + 1]
zero_fractions = parent_zero_fractions[unique_depth + 1:]
zero_fractions[:unique_depth + 1] = parent_zero_fractions[:unique_depth + 1]
one_fractions = parent_one_fractions[unique_depth + 1:]
one_fractions[:unique_depth + 1] = parent_one_fractions[:unique_depth + 1]
pweights = parent_pweights[unique_depth + 1:]
pweights[:unique_depth + 1] = parent_pweights[:unique_depth + 1]
if condition == 0 or condition_feature != parent_feature_index:
extend(
feature_indexes, zero_fractions, one_fractions, pweights,
unique_depth, parent_zero_fraction, parent_one_fraction, parent_feature_index
)
split_index = features[node_index]
# leaf node
if children_right[node_index] == -1:
for i in range(1, unique_depth + 1):
w = unwound_path_sum(feature_indexes, zero_fractions,
one_fractions, pweights, unique_depth, i)
values[feature_indexes[i], :] += w * (one_fractions[i] - zero_fractions[i]) \
* tree_values[node_index, :] * condition_fraction
# internal node
else:
# find which branch is "hot" (meaning x would follow it)
hot_index = 0
cleft = children_left[node_index]
cright = children_right[node_index]
if x_missing[split_index] == 1:
hot_index = children_default[node_index]
elif x[split_index] < thresholds[node_index]:
hot_index = cleft
else:
hot_index = cright
cold_index = (cright if hot_index == cleft else cleft)
w = node_sample_weight[node_index]
hot_zero_fraction = node_sample_weight[hot_index] / w
cold_zero_fraction = node_sample_weight[cold_index] / w
incoming_zero_fraction = 1.
incoming_one_fraction = 1.
# see if we have already split on this feature,
# if so we undo that split so we can redo it for this node
path_index = 0
while path_index <= unique_depth:
if feature_indexes[path_index] == split_index:
break
path_index += 1
if path_index != unique_depth + 1:
incoming_zero_fraction = zero_fractions[path_index]
incoming_one_fraction = one_fractions[path_index]
unwind(feature_indexes, zero_fractions, one_fractions,
pweights, unique_depth, path_index)
unique_depth -= 1
# divide up the condition_fraction among the recursive calls
hot_condition_fraction = condition_fraction
cold_condition_fraction = condition_fraction
if condition > 0 and split_index == condition_feature:
cold_condition_fraction = 0.
unique_depth -= 1
elif condition < 0 and split_index == condition_feature:
hot_condition_fraction *= hot_zero_fraction
cold_condition_fraction *= cold_zero_fraction
unique_depth -= 1
tree_shap_recursive(
children_left, children_right, children_default, features,
thresholds, tree_values, node_sample_weight,
x, x_missing, values, hot_index, unique_depth + 1,
feature_indexes, zero_fractions, one_fractions, pweights,
hot_zero_fraction * incoming_zero_fraction, incoming_one_fraction,
split_index, condition, condition_feature, hot_condition_fraction
)
tree_shap_recursive(
children_left, children_right, children_default, features,
thresholds, tree_values, node_sample_weight,
x, x_missing, values, cold_index, unique_depth + 1,
feature_indexes, zero_fractions, one_fractions, pweights,
cold_zero_fraction * incoming_zero_fraction, 0,
split_index, condition, condition_feature, cold_condition_fraction
)
def extend(feature_indexes, zero_fractions, one_fractions, pweights,
unique_depth, zero_fraction, one_fraction, feature_index):
feature_indexes[unique_depth] = feature_index
zero_fractions[unique_depth] = zero_fraction
one_fractions[unique_depth] = one_fraction
if unique_depth == 0:
pweights[unique_depth] = 1.
else:
pweights[unique_depth] = 0.
for i in range(unique_depth - 1, -1, -1):
pweights[i + 1] += one_fraction * pweights[i] * (i + 1.) / (unique_depth + 1.)
pweights[i] = zero_fraction * pweights[i] * (unique_depth - i) / (unique_depth + 1.)
def unwind(feature_indexes, zero_fractions, one_fractions, pweights,
unique_depth, path_index):
one_fraction = one_fractions[path_index]
zero_fraction = zero_fractions[path_index]
next_one_portion = pweights[unique_depth]
for i in range(unique_depth - 1, -1, -1):
if one_fraction != 0.:
tmp = pweights[i]
pweights[i] = next_one_portion * (unique_depth + 1.) / ((i + 1.) * one_fraction)
next_one_portion = tmp - pweights[i] * zero_fraction * (unique_depth - i) / (unique_depth + 1.)
else:
pweights[i] = (pweights[i] * (unique_depth + 1)) / (zero_fraction * (unique_depth - i))
for i in range(path_index, unique_depth):
feature_indexes[i] = feature_indexes[i + 1]
zero_fractions[i] = zero_fractions[i + 1]
one_fractions[i] = one_fractions[i + 1]
def unwound_path_sum(feature_indexes, zero_fractions, one_fractions, pweights, unique_depth, path_index):
one_fraction = one_fractions[path_index]
zero_fraction = zero_fractions[path_index]
next_one_portion = pweights[unique_depth]
total = 0
for i in range(unique_depth - 1, -1, -1):
if one_fraction != 0.:
tmp = next_one_portion * (unique_depth + 1.) / ((i + 1.) * one_fraction)
total += tmp
next_one_portion = pweights[i] - tmp * zero_fraction * ((unique_depth - i) / (unique_depth + 1.))
else:
total += (pweights[i] / zero_fraction) / ((unique_depth - i) / (unique_depth + 1.))
return total
class Tree:
"""
Class of Tree for SHAP explainer
Support sklearn.decisionTreeRegressor
"""
def __init__(self, model):
self.children_left = model.children_left
self.children_right = model.children_right
self.children_default = self.children_left
self.feature = model.feature
self.threshold = model.threshold.astype(np.float64)
self.values = model.value[:, 0, :]
self.node_sample_weight = model.weighted_n_node_samples
self.max_depth = model.max_depth
``` |
{
"source": "JohnL17/caluma",
"score": 3
} |
#### File: caluma/core/jexl.py
```python
from itertools import count
import pyjexl
from pyjexl.analysis import ValidatingAnalyzer
from pyjexl.exceptions import ParseError
from rest_framework import exceptions
class Cache:
"""Custom cache.
For JEXL expressions, we cannot use django's cache infrastructure, as the
cached objects are pickled. This won't work for parsed JEXL expressions, as
they contain lambdas etc.
"""
def __init__(self, max_size=2000, evict_to=1500):
self.max_size = max_size
self.evict_to = evict_to
self._cache = {}
self._mru = {}
self._mru_count = count()
def get_or_set(self, key, default):
if key in self._cache:
self._mru[key] = next(self._mru_count)
return self._cache[key]
ret = self._cache[key] = default()
self._mru[key] = next(self._mru_count)
if len(self._mru) > self.max_size:
self._evict()
return ret
def _evict(self):
to_purge = list(sorted(self._cache.keys(), key=lambda key: self._mru[key]))
# to_purge contains all keys, but we only want to evict the oldest
# ones
num_to_evict = len(to_purge) - self.evict_to
for key in to_purge[:num_to_evict]:
del self._cache[key]
del self._mru[key]
class JexlValidator(object):
def __init__(self, jexl):
self.jexl = jexl
def __call__(self, value):
errors = list(self.jexl.validate(value))
if errors:
raise exceptions.ValidationError(errors)
class JEXL(pyjexl.JEXL):
expr_cache = Cache()
def parse(self, expression):
parsed_expression = self.expr_cache.get_or_set(
expression, lambda: super(JEXL, self).parse(expression)
)
return parsed_expression
def analyze(self, expression, analyzer_class):
# some common shortcuts, no need to invoke JEXL engine for real
return super().analyze(expression, analyzer_class)
def validate(self, expression, ValidatingAnalyzerClass=ValidatingAnalyzer):
try:
for res in self.analyze(expression, ValidatingAnalyzerClass):
yield res
except ParseError as err:
yield str(err)
class ExtractTransformSubjectAnalyzer(ValidatingAnalyzer):
"""
Extract all subject values of given transforms.
If no transforms are given all subjects of all transforms will be extracted.
"""
def __init__(self, config, transforms=[]):
self.transforms = transforms
super().__init__(config)
def visit_Transform(self, transform):
if not self.transforms or transform.name in self.transforms:
yield transform.subject.value
yield from self.generic_visit(transform)
```
#### File: core/tests/test_mutation.py
```python
import pytest
from django.core.exceptions import ImproperlyConfigured
from django.db.models import IntegerField
from django.http import Http404
from rest_framework import exceptions
from rest_framework.serializers import CharField, Serializer
from .. import models, mutation, permissions, serializers, types, validations
from .fake_model import get_fake_model
def test_missing_mutation_serializer_class():
with pytest.raises(Exception) as exc:
class MyMutation(mutation.Mutation):
pass
assert str(exc.value) == "serializer_class is required for the Mutation"
def test_invalid_mutation_model_operations(db):
class MySerializer(serializers.ModelSerializer):
class Meta:
model = get_fake_model()
fields = "__all__"
with pytest.raises(Exception) as exc:
class MyMutation(mutation.Mutation):
class Meta:
serializer_class = MySerializer
model_operations = ["Add"]
assert "model_operations" in str(exc.value)
def test_invalid_mutation_update_mutate_and_get_payload(db, info):
FakeModel = get_fake_model(model_base=models.UUIDModel)
class FakeModelObjectType(types.DjangoObjectType):
class Meta:
model = FakeModel
@classmethod
def get_queryset(cls, queryset, info):
# enforce that nothing is visible
return queryset.none()
class MySerializer(serializers.ModelSerializer):
class Meta:
model = get_fake_model()
fields = "__all__"
class InvalidModelMutation(mutation.Mutation):
class Meta:
serializer_class = MySerializer
return_field_type = FakeModelObjectType
model_operations = ["update"]
with pytest.raises(Exception) as exc:
InvalidModelMutation.mutate_and_get_payload(None, info)
assert '"id" required' in str(exc.value)
def test_mutation_mutate_and_get_payload_without_model(info):
class MySerializer(Serializer):
name = CharField()
def create(self, validated_data):
return validated_data
class NoModelMutation(mutation.Mutation):
class Meta:
return_field_name = "test"
serializer_class = MySerializer
result = NoModelMutation.mutate_and_get_payload(None, info, name="test")
assert type(result) == NoModelMutation
def test_mutation_mutate_and_get_payload_without_permission(db, info):
class MySerializer(serializers.ModelSerializer):
class Meta:
model = get_fake_model()
fields = "__all__"
class NoPermission(permissions.BasePermission):
def has_permission(self, mutation, info):
return False
class MyMutation(mutation.Mutation):
permission_classes = [NoPermission]
class Meta:
serializer_class = MySerializer
with pytest.raises(exceptions.PermissionDenied):
MyMutation.mutate_and_get_payload(None, info)
def test_mutation_mutate_and_get_payload_without_object_permission(db, info):
FakeModel = get_fake_model()
instance = FakeModel.objects.create()
class FakeModelObjectType(types.DjangoObjectType):
class Meta:
model = FakeModel
class MySerializer(serializers.ModelSerializer):
class Meta:
model = FakeModel
fields = "__all__"
class NoObjectPermission(permissions.BasePermission):
def has_object_permission(self, mutation, info, instance):
return False
class MyMutation(mutation.Mutation):
permission_classes = [NoObjectPermission]
class Meta:
serializer_class = MySerializer
return_field_type = FakeModelObjectType
with pytest.raises(exceptions.PermissionDenied):
MyMutation.mutate_and_get_payload(None, info, id=str(instance.pk))
def test_mutation_mutate_and_get_payload_permission_classes_improperly_configured(
db, info
):
class MySerializer(serializers.ModelSerializer):
class Meta:
model = get_fake_model()
fields = "__all__"
class MyMutation(mutation.Mutation):
permission_classes = None
class Meta:
serializer_class = MySerializer
with pytest.raises(ImproperlyConfigured):
MyMutation.mutate_and_get_payload(None, info)
def test_mutation_mutate_and_get_payload_validation_classes_improperly_configured(
db, info
):
FakeModel = get_fake_model()
class MySerializer(serializers.ModelSerializer):
validation_classes = None
class Meta:
model = FakeModel
fields = "__all__"
class CustomNode(types.DjangoObjectType):
class Meta:
model = FakeModel
class MyMutation(mutation.Mutation):
class Meta:
serializer_class = MySerializer
with pytest.raises(ImproperlyConfigured):
MyMutation.mutate_and_get_payload(None, info)
def test_mutation_mutate_and_get_payload_validation_classes_custom_validation(
db, info, history_mock
):
FakeModel = get_fake_model(
model_base=models.UUIDModel, fields={"testnum": IntegerField(null=True)}
)
class CustomValidation(validations.BaseValidation):
def validate(self, mutation, data, info):
data["testnum"] = 1
return data
class MySerializer(serializers.ModelSerializer):
validation_classes = [CustomValidation]
class Meta:
model = FakeModel
fields = "__all__"
class CustomNode(types.DjangoObjectType):
class Meta:
model = FakeModel
class MyMutation(mutation.Mutation):
class Meta:
serializer_class = MySerializer
MyMutation.mutate_and_get_payload(None, info)
assert FakeModel.objects.first().testnum == 1
def test_user_defined_primary_key_get_serializer_kwargs_not_allowed(
db, info, history_mock
):
"""Test that user may not overwrite existing instance which is not visible."""
FakeModel = get_fake_model(model_base=models.SlugModel)
class FakeModelObjectType(types.DjangoObjectType):
class Meta:
model = FakeModel
@classmethod
def get_queryset(cls, queryset, info):
# enforce that nothing is visible
return queryset.none()
class MySerializer(serializers.ModelSerializer):
class Meta:
model = FakeModel
fields = "__all__"
class MyMutation(mutation.UserDefinedPrimaryKeyMixin, mutation.Mutation):
class Meta:
serializer_class = MySerializer
return_field_type = FakeModelObjectType
FakeModel.objects.create(slug="test")
with pytest.raises(Http404):
MyMutation.get_serializer_kwargs(None, info, slug="test")
def test_user_defined_primary_key_get_serializer_kwargs_update_not_allowed(
db, info, history_mock
):
FakeModel = get_fake_model(model_base=models.SlugModel)
class FakeModelObjectType(types.DjangoObjectType):
class Meta:
model = FakeModel
class MySerializer(serializers.ModelSerializer):
class Meta:
model = FakeModel
fields = "__all__"
class MyMutation(mutation.UserDefinedPrimaryKeyMixin, mutation.Mutation):
class Meta:
serializer_class = MySerializer
return_field_type = FakeModelObjectType
model_operations = ["create"]
FakeModel.objects.create(slug="test")
with pytest.raises(exceptions.ValidationError):
MyMutation.get_serializer_kwargs(None, info, slug="test")
def test_user_defined_primary_key_get_serializer_kwargs_create_not_allowed(db, info):
FakeModel = get_fake_model(model_base=models.SlugModel)
class FakeModelObjectType(types.DjangoObjectType):
class Meta:
model = FakeModel
class MySerializer(serializers.ModelSerializer):
class Meta:
model = FakeModel
fields = "__all__"
class MyMutation(mutation.UserDefinedPrimaryKeyMixin, mutation.Mutation):
class Meta:
serializer_class = MySerializer
return_field_type = FakeModelObjectType
model_operations = ["update"]
with pytest.raises(exceptions.ValidationError):
MyMutation.get_serializer_kwargs(None, info, slug="test")
```
#### File: core/tests/test_pagination.py
```python
import pytest
@pytest.mark.parametrize(
"first,last,before,after,has_next,has_previous",
[
(1, None, None, None, True, False),
(None, 1, None, None, False, True),
(None, None, None, None, False, False),
(None, None, None, "YXJyYXljb25uZWN0aW9uOjI=", False, True),
(None, None, "YXJyYXljb25uZWN0aW9uOjI=", None, True, False),
],
)
def test_has_next_previous(
db,
first,
last,
before,
after,
has_next,
has_previous,
schema_executor,
document_factory,
):
document_factory.create_batch(5)
query = """
query AllDocumentsQuery ($first: Int, $last: Int, $before: String, $after: String) {
allDocuments(first: $first, last: $last, before: $before, after: $after) {
pageInfo {
hasNextPage
hasPreviousPage
}
edges {
node {
id
}
}
}
}
"""
inp = {"first": first, "last": last, "before": before, "after": after}
result = schema_executor(query, variables=inp)
assert not result.errors
assert result.data["allDocuments"]["pageInfo"]["hasNextPage"] == has_next
assert result.data["allDocuments"]["pageInfo"]["hasPreviousPage"] == has_previous
```
#### File: caluma/form/factories.py
```python
from factory import Faker, LazyAttribute, Maybe, SubFactory, lazy_attribute
from ..core.factories import DjangoModelFactory
from . import models
AUTO_QUESTION_TYPES = [
t
for t in models.Question.TYPE_CHOICES
if t
not in [
models.Question.TYPE_STATIC,
models.Question.TYPE_FORM,
models.Question.TYPE_DYNAMIC_CHOICE,
models.Question.TYPE_DYNAMIC_MULTIPLE_CHOICE,
]
]
class FormFactory(DjangoModelFactory):
slug = Faker("slug")
name = Faker("multilang", faker_provider="name")
description = Faker("multilang", faker_provider="text")
meta = {}
is_published = False
is_archived = False
class Meta:
model = models.Form
class QuestionFactory(DjangoModelFactory):
slug = Faker("slug")
label = Faker("multilang", faker_provider="name")
type = Faker("word", ext_word_list=AUTO_QUESTION_TYPES)
is_required = "true"
is_hidden = "false"
configuration = {}
meta = {}
is_archived = False
format_validators = []
row_form = Maybe(
"is_table", yes_declaration=SubFactory(FormFactory), no_declaration=None
)
sub_form = Maybe(
"is_form", yes_declaration=SubFactory(FormFactory), no_declaration=None
)
static_content = Maybe(
"is_static",
yes_declaration=Faker("multilang", faker_provider="text"),
no_declaration=None,
)
data_source = Maybe(
"is_dynamic", yes_declaration="MyDataSource", no_declaration=None
)
class Meta:
model = models.Question
class Params:
is_table = LazyAttribute(lambda q: q.type == models.Question.TYPE_TABLE)
is_form = LazyAttribute(lambda q: q.type == models.Question.TYPE_FORM)
is_dynamic = LazyAttribute(
lambda q: q.type
in [
models.Question.TYPE_DYNAMIC_CHOICE,
models.Question.TYPE_DYNAMIC_MULTIPLE_CHOICE,
]
)
is_static = LazyAttribute(lambda q: q.type == models.Question.TYPE_STATIC)
class OptionFactory(DjangoModelFactory):
slug = Faker("slug")
label = Faker("multilang", faker_provider="name")
is_archived = False
meta = {}
class Meta:
model = models.Option
class QuestionOptionFactory(DjangoModelFactory):
option = SubFactory(OptionFactory)
question = SubFactory(QuestionFactory)
sort = 0
class Meta:
model = models.QuestionOption
class FormQuestionFactory(DjangoModelFactory):
form = SubFactory(FormFactory)
question = SubFactory(QuestionFactory)
sort = 0
class Meta:
model = models.FormQuestion
class DocumentFactory(DjangoModelFactory):
form = SubFactory(FormFactory)
family = None
meta = {}
class Meta:
model = models.Document
class FileFactory(DjangoModelFactory):
name = Faker("file_name")
class Meta:
model = models.File
class AnswerFactory(DjangoModelFactory):
question = SubFactory(QuestionFactory)
document = SubFactory(DocumentFactory)
meta = {}
@lazy_attribute
def value(self):
if (
self.question.type == models.Question.TYPE_MULTIPLE_CHOICE
or self.question.type == models.Question.TYPE_DYNAMIC_MULTIPLE_CHOICE
):
return [Faker("name").generate({}), Faker("name").generate({})]
elif self.question.type == models.Question.TYPE_FLOAT:
return Faker("pyfloat").generate({})
elif self.question.type == models.Question.TYPE_INTEGER:
return Faker("pyint").generate({})
elif self.question.type not in [
models.Question.TYPE_TABLE,
models.Question.TYPE_FILE,
models.Question.TYPE_DATE,
]:
return Faker("name").generate({})
return None
file = Maybe(
"is_file", yes_declaration=SubFactory(FileFactory), no_declaration=None
)
date = Maybe("is_date", yes_declaration=Faker("date"), no_declaration=None)
class Meta:
model = models.Answer
class Params:
is_file = LazyAttribute(lambda a: a.question.type == models.Question.TYPE_FILE)
is_date = LazyAttribute(lambda a: a.question.type == models.Question.TYPE_DATE)
class AnswerDocumentFactory(DjangoModelFactory):
answer = SubFactory(AnswerFactory)
document = SubFactory(DocumentFactory)
sort = 0
class Meta:
model = models.AnswerDocument
```
#### File: caluma/form/format_validators.py
```python
import re
from collections import namedtuple
from django.conf import settings
from django.utils.module_loading import import_string
from rest_framework.exceptions import ValidationError
from caluma.core.utils import translate_value
class BaseFormatValidator:
r"""Basic format validator class to be extended by any format validator implementation.
A custom format validator class could look like this:
```
>>> from caluma.form.format_validators import BaseFormatValidator
...
...
... class CustomFormatValidator(BaseFormatValidator):
... slug = "email"
... name = {"en": "E-mail", "de": "E-Mail"}
... regex = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"
... error_msg = {"en": "Not an e-mail address", "de": "Keine E-Mail adresse"}
```
"""
def __init__(self):
if not all(
[self.slug, self.regex, self.name, self.error_msg]
): # pragma: no cover
raise NotImplementedError("Missing properties!")
def validate(self, value, document):
if not re.match(self.regex, value):
raise ValidationError(translate_value(self.error_msg))
class EMailFormatValidator(BaseFormatValidator):
slug = "email"
name = {"en": "E-mail", "de": "E-Mail", "fr": "Courriel"}
regex = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"
error_msg = {
"en": "Please enter a valid Email address.",
"de": "Bitte geben Sie eine gültige E-Mail-Adresse ein.",
"fr": "Veuillez entrer une addresse e-mail valide.",
}
class PhoneNumberFormatValidator(BaseFormatValidator):
slug = "phone-number"
name = {"en": "Phone number", "de": "Telefonnummer", "fr": "numéro de téléphone"}
regex = r"^[\s\/\.\(\)-]*(?:\+|0|00)(?:[\s\/\.\(\)-]*\d[\s\/\.\(\)-]*){6,20}$"
error_msg = {
"en": "Please enter a valid phone number.",
"de": "Bitte geben Sie eine gültige Telefonnummer ein.",
"fr": "Veuillez entrer un numéro de téléphone valide.",
}
base_format_validators = [EMailFormatValidator, PhoneNumberFormatValidator]
FormatValidator = namedtuple("FormatValidator", ["slug", "name", "regex", "error_msg"])
def get_format_validators(include=None, dic=False):
"""Get all FormatValidators.
:param include: List of FormatValidators to include
:param dic: Should return a dict
:return: List of FormatValidator-objects if dic False otherwise dict
"""
format_validator_classes = [
import_string(cls) for cls in settings.FORMAT_VALIDATOR_CLASSES
] + base_format_validators
if include is not None:
format_validator_classes = [
fvc for fvc in format_validator_classes if fvc.slug in include
]
if dic:
return {ds.slug: ds for ds in format_validator_classes}
return [
FormatValidator(
slug=ds.slug,
name=translate_value(ds.name),
regex=ds.regex,
error_msg=translate_value(ds.error_msg),
)
for ds in format_validator_classes
]
```
#### File: form/tests/test_form.py
```python
import pytest
from django.utils import translation
from graphql_relay import to_global_id
from ...core.tests import extract_serializer_input_fields
from .. import models
from ..serializers import SaveFormSerializer
@pytest.mark.parametrize(
"form__description,form__name,question__type",
[("First result", "1st", models.Question.TYPE_FLOAT)],
)
def test_query_all_forms(
db,
snapshot,
form,
form_factory,
form_question,
form_question_factory,
question,
schema_executor,
):
form_factory(name="3rd", description="Second result")
form_factory(name="2nd", description="Second result")
form_question_factory(form=form)
query = """
query AllFormsQuery($name: String, $question: String, $orderBy: [FormOrdering]) {
allForms(name: $name, orderBy: $orderBy) {
edges {
node {
id
slug
name
description
meta
questions(search: $question) {
edges {
node {
id
slug
label
}
}
}
}
}
}
}
"""
result = schema_executor(
query,
variables={
"question": question.label,
"orderBy": ["NAME_ASC", "CREATED_AT_ASC"],
},
)
assert not result.errors
snapshot.assert_match(result.data)
@pytest.mark.parametrize("language_code", ("en", "de"))
@pytest.mark.parametrize("form__description", ("some description text", ""))
def test_save_form(db, snapshot, form, settings, schema_executor, language_code):
query = """
mutation SaveForm($input: SaveFormInput!) {
saveForm(input: $input) {
form {
id
slug
name
meta
}
clientMutationId
}
}
"""
inp = {"input": extract_serializer_input_fields(SaveFormSerializer, form)}
with translation.override(language_code):
result = schema_executor(query, variables=inp)
assert not result.errors
snapshot.assert_match(result.data)
def test_save_form_created_as_admin_user(db, form, admin_schema_executor, admin_user):
query = """
mutation SaveForm($input: SaveFormInput!) {
saveForm(input: $input) {
form {
createdByUser
}
}
}
"""
inp = {"input": extract_serializer_input_fields(SaveFormSerializer, form)}
form.delete() # test creation of form
result = admin_schema_executor(query, variables=inp)
assert not result.errors
assert result.data["saveForm"]["form"]["createdByUser"] == admin_user.username
@pytest.mark.parametrize("form__meta", [{"meta": "set"}])
def test_copy_form(db, form, form_question_factory, schema_executor):
form_question_factory.create_batch(5, form=form)
query = """
mutation CopyForm($input: CopyFormInput!) {
copyForm(input: $input) {
form {
slug
}
clientMutationId
}
}
"""
inp = {"input": {"source": form.pk, "slug": "new-form", "name": "Test Form"}}
result = schema_executor(query, variables=inp)
assert not result.errors
form_slug = result.data["copyForm"]["form"]["slug"]
assert form_slug == "new-form"
new_form = models.Form.objects.get(pk=form_slug)
assert new_form.name == "Test Form"
assert new_form.meta == form.meta
assert new_form.source == form
assert list(
models.FormQuestion.objects.filter(form=new_form).values("question")
) == list(models.FormQuestion.objects.filter(form=form).values("question"))
def test_add_form_question(db, form, question, form_question_factory, schema_executor):
form_questions = form_question_factory.create_batch(5, form=form)
# initialize sorting keys
for idx, form_question in enumerate(form_questions):
form_question.sort = idx + 1
form_question.save()
query = """
mutation AddFormQuestion($input: AddFormQuestionInput!) {
addFormQuestion(input: $input) {
form {
questions {
edges {
node {
slug
}
}
}
}
clientMutationId
}
}
"""
result = schema_executor(
query,
variables={
"input": {
"form": to_global_id(type(form).__name__, form.pk),
"question": to_global_id(type(question).__name__, question.pk),
}
},
)
assert not result.errors
questions = result.data["addFormQuestion"]["form"]["questions"]["edges"]
assert len(questions) == 6
assert questions[-1]["node"]["slug"] == question.slug
def test_remove_form_question(
db, form, form_question, question, snapshot, schema_executor
):
query = """
mutation RemoveFormQuestion($input: RemoveFormQuestionInput!) {
removeFormQuestion(input: $input) {
form {
questions {
edges {
node {
slug
}
}
}
}
clientMutationId
}
}
"""
result = schema_executor(
query,
variables={
"input": {
"form": to_global_id(type(form).__name__, form.pk),
"question": to_global_id(type(question).__name__, question.pk),
}
},
)
assert not result.errors
snapshot.assert_match(result.data)
def test_reorder_form_questions(db, form, form_question_factory, schema_executor):
form_question_factory.create_batch(2, form=form)
query = """
mutation ReorderFormQuestions($input: ReorderFormQuestionsInput!) {
reorderFormQuestions(input: $input) {
form {
questions {
edges {
node {
slug
}
}
}
}
clientMutationId
}
}
"""
question_ids = (
form.questions.order_by("slug").reverse().values_list("slug", flat=True)
)
result = schema_executor(
query,
variables={
"input": {
"form": to_global_id(type(form).__name__, form.pk),
"questions": [
to_global_id(type(models.Question).__name__, question_id)
for question_id in question_ids
],
}
},
)
assert not result.errors
result_questions = [
question["node"]["slug"]
for question in result.data["reorderFormQuestions"]["form"]["questions"][
"edges"
]
]
assert result_questions == list(question_ids)
def test_reorder_form_questions_invalid_question(
db, form, question_factory, schema_executor
):
invalid_question = question_factory()
query = """
mutation ReorderFormQuestions($input: ReorderFormQuestionsInput!) {
reorderFormQuestions(input: $input) {
form {
questions {
edges {
node {
slug
}
}
}
}
clientMutationId
}
}
"""
result = schema_executor(
query,
variables={
"input": {
"form": to_global_id(type(form).__name__, form.pk),
"questions": [
to_global_id(type(models.Question).__name__, invalid_question.slug)
],
}
},
)
assert result.errors
def test_reorder_form_questions_duplicated_question(
db, form, question, form_question, schema_executor
):
query = """
mutation ReorderFormQuestions($input: ReorderFormQuestionsInput!) {
reorderFormQuestions(input: $input) {
form {
questions {
edges {
node {
slug
}
}
}
}
clientMutationId
}
}
"""
result = schema_executor(
query,
variables={
"input": {
"form": to_global_id(type(form).__name__, form.pk),
"questions": [question.slug, question.slug],
}
},
)
assert result.errors
```
#### File: form/tests/test_jexl.py
```python
import pytest
from ..jexl import QuestionJexl
@pytest.mark.parametrize(
"expression,num_errors",
[
# correct case
('"question-slug"|answer|mapby', 0),
# invalid subject type
("100|answer", 1),
# two invalid subject types
('["test"]|answer || 1.0|answer', 2),
# invalid operator
("'question-slug1'|answer &&& 'question-slug2'|answer", 1),
],
)
def test_question_jexl_validate(expression, num_errors):
jexl = QuestionJexl()
assert len(list(jexl.validate(expression))) == num_errors
@pytest.mark.parametrize(
"expression,result",
[
("[1,2] intersects [2,3]", True),
("[1,2] intersects [3,4]", False),
("[] intersects []", False),
("[1] intersects []", False),
("['foo'] intersects ['bar', 'bazz']", False),
("['foo'] intersects ['foo', 'foo']", True),
("[1] intersects [1] && [2] intersects [2]", True),
("[2] intersects [1] + [2]", True),
],
)
def test_intersects_operator(expression, result):
assert QuestionJexl().evaluate(expression) == result
def test_jexl_form():
answer_by_question = {
"a1": {"value": "A1", "form": "f-main-slug"},
"b1": {"value": "B1", "form": "f-main-slug"},
}
assert (
QuestionJexl(answer_by_question, "f-main-slug").evaluate("form")
== "f-main-slug"
)
``` |
{
"source": "johnl28/py-password-generator",
"score": 4
} |
#### File: johnl28/py-password-generator/main.py
```python
import random
def GeneratePassword(length, symbols, numbers, capitals):
password = ""
while len(password) < (length):
if symbols:
password += chr(random.randint(33, 47))
symbols -= 1
elif numbers:
password += str(random.randint(0, 10))
numbers -= 1
elif capitals:
password += chr(random.randint(65, 90))
capitals -= 1
else:
if (capitals or numbers or symbols):
continue
password += chr(random.randint(65, 90)).lower()
newPassword = ""
while len(password):
index = random.randint(0, len(password)-1)
newPassword += password[index]
password = password[:index] + password[index+1:] if len(password) != 1 else ""
return newPassword
for x in range(10):
print(GeneratePassword(20, 5, 1, 5))
``` |
{
"source": "JohnL4/Diaspora",
"score": 3
} |
#### File: Diaspora/ClusterGenerator/cluster-gen.py
```python
# Generates a graph of systems for a Diaspora RPG cluster.
#
# Command-line args are system names, separated by spaces.
#
# Exmaple:
# cluster-gen.py A B C D E F > our-cluster.dot
# neato -Tpdf -oour-cluster.pdf our-cluster.dot
import argparse
import random
import sys
TECH_COLOR = {"red":0, "green":0, "blue":1}
ENVIRONMENT_COLOR = {"red":0, "green":1, "blue":0}
RESOURCE_COLOR = {"red":1, "green":0, "blue":0}
def fudgeThrow():
"""Throw 4d3-8, essentially. Four fudge dice."""
throw = 0
for i in range(4):
throw += random.randrange(-1,2)
return throw
def nodeColor_Additive(techThrow, envThrow, resThrow):
"""Returns the background color a node should have, using a fairly simple additive algorithm."""
techness = (techThrow + 4)/8
envness = (envThrow + 4)/8
resness = (resThrow + 4)/8
r = g = b = 0
for aspect in [[techness, TECH_COLOR], [envness, ENVIRONMENT_COLOR], [resness, RESOURCE_COLOR]]:
r = r + aspect[0] * aspect[1]["red"]
g = g + aspect[0] * aspect[1]["green"]
b = b + aspect[0] * aspect[1]["blue"]
# Scale back to interval [0,1]
m = 1 # max(r,g,b) # Max. it could possibly be, given the static color setup above.
mr = mg = mb = 0 # Max. red, green, blue, if a system is T4 E4 R4.
for c in [TECH_COLOR, ENVIRONMENT_COLOR, RESOURCE_COLOR]:
mr = mr + c["red"]
mg = mg + c["green"]
mb = mb + c["blue"]
m = max( mr, mg, mb)
r = r / m
g = g / m
b = b / m
# print("\tDEBUG: T{0} E{1} R{2} ==> color({3}, {4}, {5})".format( techThrow, envThrow, resThrow, r, g, b),file=sys.stderr)
# Make hex RGB color
base = 127 - 32
r = int( base + (255 - base) * r)
g = int( base + (255 - base) * g)
b = int( base + (255 - base) * b)
retval = "#{0:02x}{1:02x}{2:02x}".format( r, g, b)
return retval
# ------------------------------------------------- class StarSystem -------------------------------------------------
class StarSystem:
"""A star system."""
def __init__( self, aName):
self._name = aName
self._techLevel = fudgeThrow()
self._envLevel = fudgeThrow()
self._resLevel = fudgeThrow()
def getName( self):
return self._name
def getScore( self):
"""System's "score": sum of tech, env and res levels."""
return self._techLevel + self._envLevel + self._resLevel
def getTechLevel( self):
return self._techLevel
def setTechLevel( self, aTechLevel):
self._techLevel = aTechLevel
def getEnvLevel( self):
return self._envLevel
def getResLevel( self):
return self._resLevel
def toString( self):
return "{0}: T{1} E{2} R{3}, score= {4}".format(
self._name, self._techLevel, self._envLevel, self._resLevel, self.getScore())
# ------------------------------------------------------- main -------------------------------------------------------
nodeColor_func = nodeColor_Additive
random.seed()
argParser = argparse.ArgumentParser(description="Diaspora cluster generator")
argParser.add_argument('--legend', action="store_true", help="Include a legend of colors in the generated graph")
argParser.add_argument('systemNames', nargs=argparse.REMAINDER, help="Unique first letters of star system names")
args = argParser.parse_args()
# systemNames = sys.argv
# systemNames.pop(0) # zap program name
systemNames = args.systemNames
n = len(systemNames) # Number of systems
connected = list(range(n)) # Whether system i is connected to the cluster yet.
for i in range(n):
connected[i] = 0
starSystems = []
maxTech = -5 # Something less than -4
maxScore = -15 # Something less than 3*(-4)
minScore = 15 # Something greater than 3*4
for i in range(n):
starSys = StarSystem( systemNames[i])
starSystems.append( starSys)
if (maxTech < starSys.getTechLevel()):
maxTech = starSys.getTechLevel()
s = starSys.getScore()
if (minScore > s):
minScore = s
if (maxScore < s):
maxScore = s
print("\tDEBUG: systems before checking for slipstream guarantee:", file=sys.stderr)
for starSys in starSystems:
print( "\tDEBUG: {0}".format( starSys.toString()), file=sys.stderr)
goodSystems = []
crappySystems = []
if (maxTech < 2):
# Must fulfill "slipsteam guarantee": at least one system of T2 or better.
print('\tDEBUG: **** Max tech ({0}) < 2'.format( maxTech), file=sys.stderr)
for starSys in starSystems:
s = starSys.getScore()
if (s == maxScore):
goodSystems.append( starSys)
if (s == minScore):
crappySystems.append( starSys)
print('\tDEBUG: good systems: {0}'.format( ", ".join(map( lambda s : s.getName(), goodSystems))), file=sys.stderr)
if (len(goodSystems) == 1):
selectedGoodSystem = goodSystems[0]
else:
selectedGoodSystem = goodSystems[random.randrange(len(goodSystems))]
print('\tDEBUG: selected good system: {0}'.format( selectedGoodSystem.getName()), file=sys.stderr)
selectedGoodSystem.setTechLevel( 2)
print('\tDEBUG: crappy systems: {0}'.format( ", ".join(map( lambda s : s.getName(), crappySystems))), file=sys.stderr)
if (len(crappySystems) == 1):
crappySystems[0].setTechLevel( 2)
else:
# On the off chance that all the systems have the same score, take the "good" system out of the crappy list.
goodSystemName = selectedGoodSystem.getName()
crappySystems = list(filter( lambda s : s.getName() != goodSystemName, crappySystems))
print('\tDEBUG: crappy systems after filtering out good system (if present): {0}'.format( ", ".join(map( lambda s : s.getName(), crappySystems))), file=sys.stderr)
crappySystems[random.randrange(len(crappySystems))].setTechLevel( 2)
print( '''// Process with GraphViz neato.
graph {
graph [ // Comma-separated key=value pairs.
start="random -1", // Randomize layout. Rerun to get a different layout.
splines=true,
overlap=false,
esep="+15"
]
node [shape=circle, style=filled]
''')
if (args.legend):
print( '// Legend')
print( '{0} [label="\\N\\nT{1} E{2} R{3}", fillcolor="{4}"]'.format("All", 4, 4, 4, nodeColor_func(4,4,4)))
print( '{0} [label="\\N\\nT{1} E{2} R{3}", fillcolor="{4}"]'.format("Tech", 4, -4, -4, nodeColor_func(4,-4,-4)))
print( '{0} [label="\\N\\nT{1} E{2} R{3}", fillcolor="{4}"]'.format("Environment", -4, 4, -4, nodeColor_func(-4,4,-4)))
print( '{0} [label="\\N\\nT{1} E{2} R{3}", fillcolor="{4}"]'.format("Resources", -4, -4, 4, nodeColor_func(-4,-4,4)))
print( '{0} [label="\\N\\nT{1} E{2} R{3}", fillcolor="{4}"]'.format("None", -4, -4, -4, nodeColor_func(-4,-4,-4)))
legendNodes = ["All","Tech","Environment","Resources","None"]
for i in range(len(legendNodes)-1):
for j in range(i+1, len(legendNodes)):
print( '{0} -- {1}'.format(legendNodes[i], legendNodes[j]))
print( '// Legend ends')
print()
for starSys in starSystems:
t = starSys.getTechLevel()
e = starSys.getEnvLevel()
r = starSys.getResLevel()
color = nodeColor_func( t, e, r)
print( '{0} [label="\\N\\nT{1} E{2} R{3}", fillcolor="{4}"]'.format(starSys.getName(), t, e, r, color))
print()
# Need to roll for every system but the last two. 2nd-to-last is guaranteed
# to be connected to last, at least.
for i in range(n-2):
connectedThrow = fudgeThrow()
# print("\tDEBUG: {0} ({1}): {2}".format(i, starSystems[i].getName(), connectedThrow), file=sys.stderr)
print('\t\t// "Connect" throw from {0}: {1}'.format( starSystems[i].getName(), connectedThrow))
print( "{0} -- {1}".format(starSystems[i].getName(), starSystems[i+1].getName()))
j = i + 2
if (connectedThrow >= 0):
while ((j < n) and connected[j]):
j = j+1
if (j < n):
print("{0} -- {1}".format(starSystems[i].getName(), starSystems[j].getName()))
connected[j] = 1
if (connectedThrow > 0):
j = j+1
while ((j < n) and connected[j]):
j = j+1
if (j < n):
print("{0} -- {1}".format(starSystems[i].getName(), starSystems[j].getName()))
connected[j] = 1
# print("\tDEBUG: {0} ({1}): Last".format(n-2, starSystems[n-2].getName()), file=sys.stderr)
print('\t\t// "Connect" throw from {0}: {1}'.format( starSystems[n-2].getName(), connectedThrow))
print("{0} -- {1}".format(starSystems[n-2].getName(), starSystems[n-1].getName()))
print('\t\t// "Connect" throw from {0}: {1}'.format( starSystems[n-1].getName(), connectedThrow))
print( "}") # graph
``` |
{
"source": "JohnL4/Markovian-text",
"score": 4
} |
#### File: JohnL4/Markovian-text/markovian-text.py
```python
import argparse, collections, random
parser = argparse.ArgumentParser(description='Generate order-k Markovian text given input sample text file')
parser.add_argument('--sampleFile', type=open, required=True,
help='File containing sample text. Line terminators will be preserved')
parser.add_argument('--preserveLineBreaks', action='store_true',
help='Preserve line breaks in input; otherwise just treat them as a space character')
parser.add_argument('-k', type=int, default=3)
# parser.add_argument('--startOn', type=str, choices=['word', 'capitol', 'line', 'any'], default='word',
# help='Start generating on the given boundary (or none); governs random choice of first character ("capitol" is capitol letter beginning word)')
parser.add_argument('--terminateOn', '--endOn', choices=['EOL', 'EOP', 'length'], default='length',
help='The condition on which to terminate the generated text -- End of Line, End of Paragraph, or length of output (in characters)')
parser.add_argument('--terminateLength', '--length', type=int, default=50,
help='If --terminateOn is "length", the number of characters to output before terminating')
args = parser.parse_args()
#---------------------------------------------------- MarkovDict -----------------------------------------------------
class MarkovDict:
"""
A "dictionary" of frequency tables, mapping from a prefix string of characters (possibly 0-length) to a frequency table
of characters that follow that prefix.
"""
def __init__( self, k: int):
self._order = k
self._prefixToFreqTable = {}
def order( self) -> int:
"""Return the 'order' of this MarkovDict, where 'order' is a non-negative integer."""
return self._order
def add( self, aChar: str, aPrefix: str) -> None:
"""
Add the given character to the frequency tables of this MarkovDict by bumping the count by 1 for the given prefix
and character.
"""
if (aPrefix not in self._prefixToFreqTable):
self._prefixToFreqTable[aPrefix] = {}
if (aChar not in self._prefixToFreqTable[ aPrefix]):
self._prefixToFreqTable[ aPrefix][ aChar] = 0
self._prefixToFreqTable[ aPrefix][ aChar] += 1
def nextRandomChar( self, aTextSoFar: str) -> str:
"""Return a random next character, given the aTextSoFar."""
n = min( self.order(), len(aTextSoFar))
# prefix will be last k characters of text so far, where k is order.
prefix = aTextSoFar[ len( aTextSoFar) - n :len( aTextSoFar)]
freqTable = self._prefixToFreqTable[ prefix]
# TODO: precompute or cache the following sum
totFreq = sum( freqTable.values())
r = random.randrange( totFreq)
for (k,v) in freqTable.items():
nextChar = k
r -= v
if (r < 0):
break
return nextChar
#------------------------------------------------------- main --------------------------------------------------------
def main():
if args.terminateOn in [ 'EOL', 'EOP'] and not args.preserveLineBreaks:
raise Exception( '--terminateOn EOL or EOP requires --preserveLineBreaks')
markovDict = MarkovDict( args.k)
readSample( markovDict)
s = random.seed()
generatedText = ''
while (not stop( generatedText)):
generatedText += markovDict.nextRandomChar( generatedText)
print( generatedText.rstrip())
def stop( aTextSoFar: str) -> bool:
if (args.terminateOn == 'EOP'):
retval = aTextSoFar.endswith('\n\n')
elif (args.terminateOn == 'EOL'):
retval = aTextSoFar.endswith( '\n')
elif (args.terminateOn == 'length'):
retval = len( aTextSoFar) >= args.terminateLength
return retval
def readSample( aMarkovDict: MarkovDict) -> None:
"""Read the given sample file into the given MarkovDict."""
print( f'Reading {args.sampleFile.name} into MarkovDict of order {aMarkovDict.order()}')
lineEnd = '\n' if args.preserveLineBreaks else ' '
filePrefix = ''
prevChars = collections.deque()
for line in args.sampleFile:
line = line.rstrip()
if len( filePrefix) < aMarkovDict.order():
filePrefix += line[0:aMarkovDict.order()]
if len( filePrefix) < aMarkovDict.order():
# Still too short; more characters might be coming from the next line, so slap on a lineEnd.
filePrefix += lineEnd
for char in line:
analyzePrevChars( aMarkovDict, prevChars, char)
pushChar( prevChars, char, aMarkovDict.order())
analyzePrevChars( aMarkovDict, prevChars, lineEnd)
pushChar( prevChars, lineEnd, aMarkovDict.order())
# Loop around to beginning of file a bit so if we randomly get a sequence of characters at the end of the file, we
# don't bomb out because that sequence isn't in the MarkovDict.
for char in filePrefix:
analyzePrevChars( aMarkovDict, prevChars, char)
pushChar( prevChars, char, aMarkovDict.order())
def analyzePrevChars( aMarkovDict: MarkovDict, aPrevChars: collections.deque, aChar: str) -> None:
"""Analyze the given string of characters into the given MarkovDict."""
prefixes = prefixesFromDeque(aPrevChars)
for pfx in prefixes:
aMarkovDict.add(aChar, pfx)
while (len( aPrevChars) > aMarkovDict.order()):
aPrevChars.popleft()
def prefixesFromDeque( aDequeOfChars: collections.deque) -> [str]:
"""
A list of prefixes from the given deque, including the last char. Prefixes are built from the right, so if the
current deque is ['a','b','c'], the returned prefixes will be ['', 'c', 'bc', 'abc'].
"""
retval = []
for i in range( len( aDequeOfChars) + 1):
pfx = ""
for j in range(i):
pfx = aDequeOfChars[ -j - 1 ] + pfx
retval.append( pfx)
return retval
def pushChar( aDequeOfChars: collections.deque, aChar: str, aMaxLength: int) -> None:
"""Push the given char onto the deque, keeping the length of the deque no more than aMaxLength."""
aDequeOfChars.append( aChar)
while (len( aDequeOfChars) > aMaxLength):
aDequeOfChars.popleft()
#---------------------------------------------------- call main() ----------------------------------------------------
if __name__ == '__main__':
main()
``` |
{
"source": "JohnL4/PythonPrimes",
"score": 4
} |
#### File: JohnL4/PythonPrimes/PrimeFinder.py
```python
__author__ = 'john'
class PrimeFinder:
"""Abstract base class for classes that find prime numbers."""
def primesNotGreaterThan(self, aMaximum):
"""Returns a list of primes all of which are less than aMaximum."""
# This implementation is stupid on purpose.
somePrimes = [2,3,5,7]
retval = []
i = 0
while (i < len(somePrimes)):
if (somePrimes[i] <= aMaximum):
retval.append( somePrimes[i])
else:
break
i += 1
return retval
``` |
{
"source": "JohnLamontagne/Lunar-Mirage",
"score": 2
} |
#### File: Dialogues/.scripts/dialogueTest.py
```python
def testDialogueHandler(args):
print("Test response handler. Interacting player: " + args.Player.Name)
```
#### File: Npcs/.scripts/SallyAggressive.py
```python
import random
import npc_common
from Lunar.Core import *
from Lunar.Core.Utilities.Data import *
from Lunar.Core.Utilities.Logic import *
from Lunar.Server.Utilities import *
from Lunar.Server.World.BehaviorDefinition import *
from Lunar.Server.World.Actors import *
class CombatNPCState(IActorState[NPC]):
def __init__(self):
return
def OnEnter(self, npc):
print("In combat with " + str(npc.Target))
nextAttackTimer = GameTimer(2000)
npc.GameTimers.Register('nextAttackTimer', nextAttackTimer)
def OnExit(self, npc):
return
def Update(self, gameTime, npc):
if not npc.HasTarget() or not npc.Target.Attackable:
return IdleState()
if npc.GameTimers.Get('nextAttackTimer').Finished:
if npc.WithinAttackingRangeOf(npc.Target):
npc.Behavior.Attack(npc, npc.Target)
npc.GameTimers.Get('nextAttackTimer').Reset()
else:
if (npc.GoTo(npc.Target)):
return MovingState(self)
else:
print("Lost target")
return IdleState()
return self
class IdleState(IActorState[NPC]):
def __init__(self):
return
def OnEnter(self, npc):
return
def OnExit(self, npc):
return
def Update(self, gameTime, npc):
target = npc.FindPlayerTarget()
if target:
npc.Target = target
return CombatNPCState()
elif npc.GameTimers.Get('randomWalkTmr').Finished:
return WanderState()
else:
return self
class MovingState(IActorState[NPC]):
def __init__(self, return_state):
self.return_state = return_state
def OnEnter(self, npc):
return
def OnExit(self, npc):
return
def Update(self, gameTime, npc):
if not npc.Moving:
return self.return_state
else:
# continue to check for combat opportunities while moving
target = npc.FindPlayerTarget()
if target:
npc.Target = target
return CombatNPCState()
else:
return self
class WanderState(IActorState[NPC]):
def __init__(self):
return
def OnEnter(self, npc):
return
def OnExit(self, npc):
return
def Update(self, gameTime, npc):
if npc.GameTimers.Get('randomWalkTmr').Finished:
npc.GameTimers.Get('randomWalkTmr').Reset()
direction = (-1 if random.random() < .5 else 1)
randomX = random.random() * (npc.Descriptor.MaxRoam.X * EngineConstants.TILE_SIZE) * direction
randomY = random.random() * (npc.Descriptor.MaxRoam.Y * EngineConstants.TILE_SIZE) * direction
dest = npc.Descriptor.Position + Vector(randomX, randomY)
print("Moving to " + str(dest))
npc.GoTo(dest)
return MovingState(self) # will return to this state when it is finished
else:
return IdleState()
class AggressiveNPCBehaviorDefinition(ActorBehaviorDefinition):
def __init__(self):
return 0
def Update(self, npc, gameTime):
return
def OnCreated(self, npc):
randomWalkTmr = GameTimer(500)
npc.GameTimers.Register('randomWalkTmr', randomWalkTmr)
npc.StateMachine.Start(IdleState())
def Attack(self, npc, target):
damage = 5
target.OnAttacked(npc, damage)
def Attacked(self, npc, attacker, damage_delt):
print 'Not implemented'
# Create an object of our AggressiveNPCBehaviorDefinition
# and assign it to BehaviorDefinition. This is used by the
# server to hook in our behavior.
BehaviorDefinition = AggressiveNPCBehaviorDefinition()
```
#### File: World/Scripts/npc_common.py
```python
import clr
clr.AddReference('Lunar.Core')
clr.AddReference('Lunar.Server')
import math
from Lunar.Server import *
from Lunar.Core.World.Actor import *
def acquire_target(npc, gameTime):
target = npc.FindPlayerTarget()
if target:
npc.Target = target
npc.Aggrevated = True
elif not npc.State == ActorStates.Moving: # No target to find, let's just try finding a random place to wander.
randomWalkTmr = npc.GameTimerManager.GetTimer("randomWalkTmr" + str(npc.GetHashCode()))
if randomWalkTmr and randomWalkTmr.Finished:
random_walk(npc)
randomWalkTmr.Reset()
def random_walk(npc):
randomX = math.random() * (npc.MaxRoam.X * Constants.TILE_SIZE)
randomY = math.random() * (npc.MaxRoam.Y * Constants.TILE_SIZE)
signx = -1 if math.random() < .5 else 1
signy = -1 if math.random() < .5 else 1
npc.GoTo(Vector(randomX * signX, randomY * signY))
``` |
{
"source": "johnlane/sdinfo",
"score": 3
} |
#### File: johnlane/sdinfo/sdinfo.py
```python
import sys
def unstuff(x, start, size):
return (x >> start) & (2**size - 1)
def unstuffs(x,start,size):
s = ""
while (size > 0):
size-=8
s+=chr(unstuff(x,start+size,8))
return s
def yesno (n):
return "yes" if n else "no"
def main(name, args):
if len(args) != 1:
print "Syntax: %s <card>" % (name, )
print "Example: %s mmcblk0" % (name, )
return 100
card = args[0]
dev = "/sys/class/block/%s/device" % (card, )
# CID : Card Identification
print "------------------------------------------------------------"
cid = int(file(dev+"/cid").read(), 16)
print "CID : Card Identification : %x" % cid
print
# Bits 120-127 contain the MID. This identifies the card manufacturer
# The codes are allocated by SD-3C, LLC (http://www.sd-3c.com)
# and would not appear to be publicly available.
mid = unstuff(cid,120,8)
print "MID : Manufacturer ID : %d" % mid
print
# Bits 104-119 contain the OID. This identifies the card OEM.
# The codes are allocated by SD-3C, LLC (http://www.sd-3c.com)
# and would not appear to be publicly available.
oid = unstuffs(cid,104,16)
print "OID : OEM/Application ID : %s : 0x%x" % (oid,unstuff(cid,104,16))
print
# Bits 64-103 contain the product name, a 5 character ASCII string
pnm = unstuffs(cid,64,40)
print "PNM : Product Name : %s" % pnm
print
# Bits 56-63 contain the product revision, 4 bits major and 4 bits minor.
prv_major = unstuff(cid,60,4)
prv_minor = unstuff(cid,56,4)
print "PRV : Product Revision : %d.%d" % (prv_major,prv_minor)
print
# Bits 24-55 contain the product serial number, a 32 bit binary number.
psn = unstuff(cid,24,32)
print "PSN : Product Serial Number : %x" % psn
print
# Bits 20-23 are reserved
# Bits 8-19 contain the manufacturing date, 4 bits for month and
# 8 bits for year, with 0 meaning year 2000.
mdt_y = unstuff(cid,12,8)+2000
mdt_m = unstuff(cid,8,4)
print "MDT : Maunfacturing Date : %d.%d" % (mdt_y,mdt_m)
print
# Bits 1-7 contain the CRC checksum
cid_crc = unstuff(cid,1,7)
print "CRC : CRC : %d" % cid_crc
# Bit 0 is unused
# CSD : Card-Specific Data
print "------------------------------------------------------------"
csd = int(file(dev+"/csd").read(), 16)
print "CSD : Card-Specific Data : %x" % csd
print
# Bit 126-127 contain the CSD Structure version.
# This affects how some csd fields are interpreted.
csd_structure = unstuff(csd,126,2)
print "CSD_STRUCTURE: %d" % (csd_structure)
csd_version = csd_structure + 1
if csd_version > 2:
print "Out of range CSD_STRUCTURE: %d" % csd_structure
return 100
print "CSD Version : %d" % csd_version
print
# Bits 120-125 are reserved
# Bits 112-119 contain the data read access time.
# Bits 0-2 contain the time unit.
# Bits 3-6 contain the time value.
# Bit 7 is reserved,
taac = unstuff(csd,112,6)
taac_time_unit = 10**unstuff(taac,0,3)
taac_time_value = {
0: 0,
1: 1.0,
2: 1.2,
3: 1.3,
4: 1.5,
5: 2.0,
6: 2.5,
7: 3.0,
8: 3.5,
9: 4.0,
10: 4.5,
11: 5.0,
12: 5.5,
13: 6.0,
14: 7.0,
15: 8.0
}[unstuff(taac,3,4)]
print "TAAC: data read access time : %d : 0x%x" % (taac,taac)
print " unit : %d" % taac_time_unit
print " value : %d => %f" % (unstuff(taac,3,4),taac_time_value)
print " : %f (nanoseconds)" % (taac_time_unit * taac_time_value)
print
# Bits 104-111 contain the data read access time in clock cycles
# Unit multiplier is 100 clock cycles.
nsac = unstuff(csd,104,8)
print "NSAC: data read access time (in clock cycles) : %d" % (nsac*100)
print
# Bits 96-103 contain the maximum data transfer rate.
# Bits 0-2 contain the time unit.
# Bits 3-6 contain the time value.
# Bit 7 is reserved,
tran_speed = unstuff(csd,96,8)
tran_speed_unit = (10**unstuff(tran_speed,0,3)) / 10
tran_speed_value = {
0: 0,
1: 1.0,
2: 1.2,
3: 1.3,
4: 1.5,
5: 2.0,
6: 2.5,
7: 3.0,
8: 3.5,
9: 4.0,
10: 4.5,
11: 5.0,
12: 5.5,
13: 6.0,
14: 7.0,
15: 8.0
}[unstuff(tran_speed,3,4)]
print "TRAN_SPEED : max data transfer rate : %d" % tran_speed
print " unit : %d" % tran_speed_unit
print " value : %d => %f" % (unstuff(tran_speed,3,4),tran_speed_value)
print " : %f (Mbit/s) " % (tran_speed_unit * tran_speed_value)
print
# Bits 84-95 contain the card command classes.
ccc = unstuff(csd,84,12)
print "CCC : card command classes : %d" % ccc
c=0
while ccc > 2**c:
if (ccc&(2**c)) != 0: print " : class %d" % c
c+=1
print
# Bits 80-83 contain the maximum read data block length.
# actual value is 2 ^ stored value
read_bl_len = unstuff(csd,80,4)
len_bl_read = 2**read_bl_len
print "READ_BL_LEN : max read data block length : %d" % read_bl_len
print "LEN_BL_READ : max read block data length : %d bytes ( 2^%d)" % (len_bl_read,read_bl_len)
print
# Bit 79 is set if partial blocks for reads are allowed
# this is always allowed in an SD Memory Card. It means that smaller blocks
# can be used as well. The minimum block size will be one byte.
read_bl_partial = unstuff(csd,79,1)
print "READ_BL_PARTIAL : partial blocks for read allowed : %s (%d)" % (yesno(read_bl_partial),read_bl_partial)
print
# Bit 78 is set if write block misalignment is allowed. This defines if the data
# block to be written by one command can be spread over more than one
# physical block. The size of the memory block is defined by WRITE_BL_LEN.
write_blk_misalign = unstuff(csd,78,1)
print "WRITE_BLK_MISALIGN : write block misalignment : %s (%d)" % (yesno(write_blk_misalign),write_blk_misalign)
print
# Bit 77 is set if read block misalignment is allowed. This defines if the data
# block to be read by one command can be spread over more than one
# physical block. The size of the memory block is defined by READ_BL_LEN.
read_blk_misalign = unstuff(csd,77,1)
print "READ_BLK_MISALIGN : read block misalignment : %s (%d)" % (yesno(read_blk_misalign),read_blk_misalign)
print
# Bit 76 is set if DSR (Driver State Register) is implemented. This is true if
# the configurable driver stage is integrated on the card.
dsr_imp = unstuff(csd,76,1)
print "DSR_IMP : DSR implemented : %s (%d)" % (yesno(dsr_imp),dsr_imp)
print
# Bits 74-75 are reserved
# Bits 47-73 are implemented differently for CSD version 1 and 2
if csd_version == 1:
# Bits 62-73 contain the C_SIZE used to compute the user's data card capacity.
c_size = unstuff(csd,62,12)
print "C_SIZE : device size : %d : 0x%x" % (c_size,c_size)
print
# Lookup for max current at min Vdd
curr_min = {
0: 0.5,
1: 1,
2: 5,
3: 10,
4: 25,
5: 35,
6: 60,
7:100
}
# Lookup for max current at max Vdd
curr_max = {
0: 1,
1: 5,
2: 10,
3: 25,
4: 35,
5: 45,
6: 80,
7:200
}
# Bits 59-61 contain the maximum read current at the minimum power supply Vdd
vdd_r_curr_min = unstuff(csd,59,3)
print "VDD_R_CURR_MIN : max read current @ VDD min : %d : %d mA" % (vdd_r_curr_min,curr_min[vdd_r_curr_min])
print
# Bits 56-58 contain the maximum read current at the maximum power supply Vdd
vdd_r_curr_max = unstuff(csd,56,3)
print "VDD_R_CURR_MAX : max read current @ VDD max : %d : %d mA" % (vdd_r_curr_max,curr_max[vdd_r_curr_max])
print
# Bits 53-55 contain the maximum write current at the minimum power supply Vdd
vdd_w_curr_min = unstuff(csd,53,3)
print "VDD_W_CURR_MIN : max write current @ VDD min : %d : %d mA" % (vdd_w_curr_min,curr_min[vdd_w_curr_min])
print
# Bits 50-52 contain the maximum write current at the maximum power supply Vdd
vdd_w_curr_max = unstuff(csd,50,3)
print "VDD_W_CURR_MAX : max write current @ VDD max : %d : %d mA" % (vdd_w_curr_max,curr_max[vdd_w_curr_max])
print
# Bits 47-49 contains a coding factor for computing the total device size
c_size_mult = unstuff(csd,47,3)
print "C_SIZE_MULT : device size multiplier : %d" % c_size_mult
print
# Card capacity is calculated from C_SIZE and C_SIZE_MULT
mult = 2**(c_size_mult+2)
blocknr = (c_size+1) * mult
block_len = 2**read_bl_len
memory_capacity = blocknr * block_len
print "User's data card capacity : %d : 0x%x (B)" % (memory_capacity,memory_capacity)
print " : %d : 0x%x (KiB)" % (memory_capacity/1024,memory_capacity/1024)
print " : %d : 0x%x (MiB)" % (memory_capacity/1024/1024,memory_capacity/1024/1024)
print
if csd_version == 2:
# Bits 70-73 are reserved
# Bits 48-69 contain the C_SIZE used to compute the user's data card capacity.
c_size = unstuff(csd,48,22)
print "C_SIZE : device size : %d : 0x%x" % (c_size,c_size)
print " user data area capacity : %d KiB" % ((c_size+1) * 512)
print " : %d MiB" % ((c_size+1) * 512 / 1024)
print " : %d GiB" % ((c_size+1) * 512 / 1024**2)
print
# Bit 47 is reserved
# Bit 46 defines the erase block length. This is the granularity of the unit size
# of the data to be erased:
# 0 = granularity is SECTOR SIZE (i.e. can not erase single blocks)
# 1 = granularity is 512 bytes (i.e. can erase single blocks)
erase_block_en = unstuff(csd,46,1)
print "ERASE_BLOCK_EN : erase single block enable : %s (%d)" % (yesno(erase_block_en),erase_block_en)
print
# Bits 39-45 contain the size of an erasable sector as a number of write blocks
# The actual value is value +1.
sector_size = unstuff(csd,39,7)+1
write_bl_len = unstuff(csd,22,4) # captured out of sequence as needed for this calculation
len_bl_write = 2**write_bl_len # computed out of sequence as needed for this calculation
print "SECTOR_SIZE : erase sector size : %d : 0x%x (write blocks)" % (sector_size,sector_size)
print " : %d B" % (sector_size*len_bl_write)
print " : %d KiB" % (sector_size*len_bl_write/1024)
print
# Bits 32-38 contain the write protect group size.
# The actual value is value +1.
wp_grp_size = unstuff(csd,32,7)+1
print "WP_GRP_SIZE : write protect group size : %d" % wp_grp_size
print " : %d (KiB)" % (wp_grp_size*sector_size)
print
# Bit 31 defines if group write protection is available (0=no, 1=yes).
wp_grp_enable = unstuff(csd,31,1)
print "WP_GRP_ENABLE : write protect group enable : %s (%d)" % (yesno(wp_grp_enable),wp_grp_enable)
print
# Bits 29-30 are reserved
# Bits 26-28 defines the typical write time as a multiple of the read time
r2w_factor = 2**unstuff(csd,26,3)
print "R2W_FACTOR : write speed factor : %d" % r2w_factor
print " : writing is %d times slower than reading" % r2w_factor
print
# Bits 22-25 contain the write block length, captured above
print "WRITE_BL_LEN : max write block data length : %d" % (write_bl_len)
print "LEN_BL_WRITE : max write block data length : %d bytes ( 2^%d)" % (len_bl_write,write_bl_len)
print
# Bit 21 defines wtether partial block sizes can be used in block write commands
write_bl_partial = unstuff(csd,21,1)
print "WRITE_BL_PARTIAL : partial blocks for write allowed : %s (%d)" % (yesno(write_bl_partial),write_bl_partial)
print
# Bits 16-20 are reserved
# Bit 15 indicates the selected group of file formats. This field is read only
# for ROM. Value is 0 or 1. 1 is reserved. 0: see file_format below.
file_format_grp = unstuff(csd,15,1)
print "FILE_FORMAT_GRP : file format group : %d" % file_format_grp
print
# Bit 14 is the copy flag and indicates whether the contents are original (0) or
# have been copied (1). It's a one time programmable bit (except ROM card).
copy = unstuff(csd,14,1)
print "COPY : copy flag : %d" % copy
print
# Bit 13 Permanently write protects the card. 0 = not permanently write protected
perm_write_protect = unstuff(csd,13,1)
print "PERM_WRITE_PROTECT : permanent write protection : %s (%d)" % (yesno(perm_write_protect),perm_write_protect)
print
# Bit 12 Tempoarily write protects the card. 0 = not write protected, 1 = write protecteed
tmp_write_protect = unstuff(csd,12,1)
print "TMP_WRITE_PROTECT : temporary write protection : %s (%d)" % (yesno(tmp_write_protect),tmp_write_protect)
print
# Bits 10-11 indicates the file format on the card
file_format = unstuff(csd,10,2)
file_format_value = "Reserved" if file_format_grp != 0 else {
0: "Hard disk-like file system with partition table",
1: "DOS FAT (floppy-like) with boot sector only (no partition table)",
2: "Universal File Format",
3: "Others/Unknown"
}[file_format]
print "FILE_FORMAT : file format : %d : %s" % (file_format, file_format_value)
print
# Bits 8-9 are reserved
# Bits 1-7 contain the CRC
crc = unstuff(csd,1,7)
print "CRC : CRC : %d" % crc
print
# Bit 0 is unused
# SCR : SD Card Configuration Register
print "------------------------------------------------------------"
scr = int(file(dev+"/scr").read(), 16)
print "SCR : SD Card Configuration Register : %x" % scr
print
# Bits 60-63 contain the scr structure version
scr_structure = unstuff(scr,60,4)
scr_structure_version = "SCR version 1.0" if scr_structure == 0 else "reserved"
print "SCR_STRUCTURE : SCR Structure Version : %d : %s" % (scr_structure, scr_structure_version)
print
# Bits 56 to 59 contain the SD Memory Card spec version
sd_spec = unstuff(scr,56,4)
sd_spec3 = unstuff(scr,47,1)
print "SD_SPEC : SD Memory Card - Spec. Version : %d" % sd_spec
print "SD_SPEC3 : Spec. Version 3.00 or higher : %d" % sd_spec3
sd_spec_version = {
0 : "Version 1.0 and 1.01",
1 : "Version 1.10",
2 : "Version 2.00",
3 : "Version 3.0X"
}[sd_spec+sd_spec3]
print "SD_SPEC: SD Memory Card - Spec. Version : %s" % sd_spec_version
print
# Bit 55 the data status after erase, either 0 or 1 (card vendor dependent)
data_stat_after_erase = unstuff(scr,55,1)
print "DATA_STAT_AFTER_ERASE : data status after erases : %d" % data_stat_after_erase
print
# Bits 52-54 indicates the CPRM Security Specification Version for each capacity card.
sd_security = unstuff(scr,52,3)
sd_security_version = {
0 : "None",
1 : "Not Used",
2 : "SDSC Card (Security Version 1.01)",
3 : "SDHC Card (Security Version 2.00)",
4 : "SDXC Card (Security Version 3.xx)"
}[sd_security]
print "SD_SECURITY : CPRM Security Support : %d : %s" % (sd_security,sd_security_version)
print
# Bits 48 to 51 indicate the supported DAT bus widths
sd_bus_widths = unstuff(scr,48,4)
sd_bus_width_1bit = unstuff(scr,48,1)
sd_bus_width_4bit = unstuff(scr,50,1)
print "SD_BUS_WIDTHS : DAT Bus widths supported : %d" % sd_bus_widths
if (sd_bus_width_1bit == 1): print " : 1 bit (DAT0)"
if (sd_bus_width_4bit == 1): print " : 4 bit (DAT0-3)"
print
# Bit 47 read with SD_SPEC, above
# Bits 43-46 indicates extended security
ex_security = unstuff(scr,43,4)
ex_security_supported = ("not supported","supported")[ex_security > 0]
print "EX_SECURITY : Extended Security Support : %d (%s)" % (ex_security,ex_security_supported)
print
# Bits 34 to 42 are reserved
# Bits 32-33 are command support bits
cmd_support = unstuff(scr,32,2)
cmd_support_scc = unstuff(scr,32,1)
cmd_support_sbc = unstuff(scr,33,1)
print "CMD_SUPPORT : Command Support bits : %d" % cmd_support
if cmd_support_sbc == 1: print " : Set Block Count (CMD23)"
if cmd_support_scc == 1: print " : Speed Class Control (CMD20)"
print
# Bits 0 to 31 are reserved
# Preferred Erase Size
print "------------------------------------------------------------"
pes = int(file(dev+"/preferred_erase_size").read())
print "Preferred Erase Size : %d" % pes
print " : %d MiB" % (pes >> 20)
print
# Erase Size : the minimum size, in bytes, of an erase operation.
# 512 if the card is block-addressed, 0 otherwise.
print "------------------------------------------------------------"
es = int(file(dev+"/erase_size").read())
print "Erase Size : %d KiB" % es
print
# Derived information follows
print "------------------------------------------------------------"
print "Derived Data"
print
print "LBA Sector Alignment"
print "--------------------"
print
sector_alignment_grain = pes/512
print "Align each partition with the preferred erase size"
print " %d / 512 = %d sectors" % (pes,sector_alignment_grain)
sys.exit(main(sys.argv[0], sys.argv[1:]))
``` |
{
"source": "JohnLapis/algorithms",
"score": 4
} |
#### File: algorithms/sorting/heapsort.py
```python
from math import inf
# The indexing starts at 1
def max_heapify(heap, idx, heap_size=None):
if heap_size is None:
heap_size = len(heap)
if heap_size <= 1: return heap
max_idx = max(
# parent, left, right nodes
[idx, 2*idx, 2*idx + 1],
key=lambda i: heap[i-1] if i-1 < heap_size else -inf
)
if max_idx == idx:
return heap
else:
heap[idx-1], heap[max_idx-1] = heap[max_idx-1], heap[idx-1]
return max_heapify(heap, max_idx, heap_size)
def make_max_heap(array):
for i in range(len(array) // 2, 0, -1):
array = max_heapify(array, i)
return array
def heapsort(array):
heap = make_max_heap(array)
heap_size = len(heap)
for i in range(len(heap), 1, -1):
heap[i-1], heap[0] = heap[0], heap[i-1]
heap_size -= 1
heap = max_heapify(heap, 1, heap_size)
return heap
```
#### File: algorithms/sorting/selection_sort.py
```python
def sort(array):
if len(array) <= 1: return array
minIdx, minNum = 0, array[0]
for i in range(len(array)):
if array[i] < minNum:
minIdx, minNum = i, array[i]
if minIdx == 0:
sortedSubArray = sort(array[1:])
sortedSubArray.insert(0, minNum)
pass
else:
array[minIdx] = array[0]
sortedSubArray = sort(array[1:])
sortedSubArray.insert(0, minNum)
return sortedSubArray
``` |
{
"source": "JohnLapis/cognosaurus",
"score": 2
} |
#### File: tests/performance/test_api_with_db.py
```python
import cProfile
import inspect
import os
import pytest
from rest_framework.test import APIRequestFactory
from cognosaurus.api.views import CognateViewSet
def test_cognate_viewset_in_loop():
rf = APIRequestFactory()
with open(os.path.dirname(__file__) + "/code_for_test_big_request.py", "r") as f:
code = f.read()
print()
cProfile.runctx(code, globals(), locals())
``` |
{
"source": "johnlarusic/lebus",
"score": 3
} |
#### File: lebus/lebus/schedule.py
```python
from . import *
import threading
import time
from datetime import datetime
import urllib2
import json
class Schedule(object):
data_pulls = 0
def __init__(self, route, stop, label, dir, min_time, max_time, api_key):
self.lock = threading.Lock()
self.route = route
self.stop = stop
self.label = label
self.dir = dir
self.min_time = min_time
self.max_time = max_time
self.api_key = api_key
self.next = []
def update(self):
Schedule.data_pulls += 1
self.log(
"Pull updated data (pull #{} for day".format(Schedule.data_pulls))
next_buses = self.get_next_times()
self.log("Waiting for lock")
self.lock.acquire()
try:
self.log("Acquired lock")
self.next = next_buses
finally:
self.log("Released lock")
self.lock.release()
def pull_data(self):
url_call = 'http://api.translink.ca/rttiapi/v1/stops/{}/estimates?routeNo={}&apikey={}&count=3'
url = url_call.format(self.stop, self.route, self.api_key)
self.log("Download data from {}".format(url))
try:
req = urllib2.Request(url, None, headers={
'Content-type': 'application/json', 'Accept': 'application/json'})
response = urllib2.urlopen(req)
json_list = json.loads(response.read())
if len(json_list) > 0:
return json_list[0]
except urllib2.HTTPError as ex:
self.log("Error pulling new bus data from URL '{}', error code '{}'".format(url, ex.code), True)
except Exception as ex:
self.log("Error pulling new bus data from URL '{}'".format(url), True)
return None
def get_next_times(self):
data = None
try:
data = self.pull_data()
except Exception as ex:
self.log("Error pulling new bus data for route '{}', stop '{}'".format(self.route, self.stop), True)
next_buses = list()
if data is not None:
for s in data['Schedules']:
try:
time_string = s['ExpectedLeaveTime']
if not s['CancelledStop']:
time_obj = self.parse_date_time(time_string)
self.log("Expected leave time: {} (parse of {})".format(
time_obj, time_string))
next_buses.append(time_obj)
except Exception as ex:
self.log("Error parsing bus data '{}'".format(
time_string), True)
return next_buses
def parse_date_time(self, date_string, add_leading_zero=False):
date = ""
time = date_string
if " " in date_string:
time, date = date_string.split(" ")
# Parse the time portion
hour, min_xm = time.split(":")
hour = int(hour)
minute = int(min_xm[:-2])
xm = min_xm[-2:]
if xm == "pm" and hour < 12:
hour += 12
elif xm == "am" and hour == 12:
hour = 0
# Parse the date portion
now = datetime.now()
year = now.year
month = now.month
day = now.day
if now.hour > hour:
day += 1
if date != "":
year, month, day = date.split("-")
year = int(year)
month = int(month)
day = int(day)
dt = datetime(year, month, day, hour, minute)
return dt
def log(self, message, error_ind=False):
if error_ind:
LOG_HELPER.error("UPDATE %s/%s: %s",
self.route, self.stop, message)
else:
LOG_HELPER.debug("UPDATE %s/%s: %s",
self.route, self.stop, message)
``` |
{
"source": "JohnLaTwC/msticpy",
"score": 3
} |
#### File: msticpy/nbtools/pkg_config.py
```python
import os
import sys
import warnings
from pathlib import Path
from typing import Any, Dict, Optional
import pkg_resources
import yaml
from .._version import VERSION
__version__ = VERSION
__author__ = "<NAME>"
_CONFIG_FILE: str = "msticpyconfig.yaml"
_CONFIG_ENV_VAR: str = "MSTICPYCONFIG"
# pylint: disable=invalid-name
default_settings: Dict[str, Any] = {}
custom_settings: Dict[str, Any] = {}
settings: Dict[str, Any] = {}
def refresh_config():
"""Re-read the config settings."""
# pylint: disable=global-statement
global default_settings, custom_settings, settings
default_settings = _get_default_config()
custom_settings = _get_custom_config()
settings = _consolidate_configs(default_settings, custom_settings)
def _read_config_file(config_file: str) -> Dict[str, Any]:
"""
Read a yaml config definition file.
Parameters
----------
config_file : str
Path to yaml config file
Returns
-------
Dict
Configuration settings
"""
if Path(config_file).is_file():
with open(config_file) as f_handle:
# use safe_load instead of load
return yaml.safe_load(f_handle)
return {}
def _consolidate_configs(
def_config: Dict[str, Any], cust_config: Dict[str, Any]
) -> Dict[str, Any]:
resultant_config = {}
resultant_config.update(def_config)
_override_config(resultant_config, cust_config)
return resultant_config
def _override_config(base_config: Dict[str, Any], new_config: Dict[str, Any]):
for c_key, c_item in new_config.items():
if c_item is None:
continue
if isinstance(base_config.get(c_key), dict):
_override_config(base_config[c_key], new_config[c_key])
else:
base_config[c_key] = new_config[c_key]
def _get_default_config():
# When called from a unit test msticpy is a level above the package root
# so the first call produces an invalid path
# return the actual path - pkgpath/msticpy/filename.yaml or just
# pkgpath/filename.yaml. So we test it as we go
conf_file = None
top_module = _get_top_module()
try:
conf_file = pkg_resources.resource_filename(top_module, _CONFIG_FILE)
if not Path(conf_file).is_file():
conf_file = pkg_resources.resource_filename(
top_module, "msticpy/" + _CONFIG_FILE
)
except ModuleNotFoundError:
pass
if not conf_file or not Path(conf_file).is_file():
# if all else fails we try to find the package default config somewhere
# in the package tree - we use the first one we find
pkg_paths = sys.modules[top_module]
if pkg_paths:
conf_file = next(Path(pkg_paths.__path__[0]).glob("**/" + _CONFIG_FILE))
if conf_file:
return _read_config_file(conf_file)
return {}
def _get_custom_config():
config_path = os.environ.get(_CONFIG_ENV_VAR, None)
if config_path and Path(config_path).is_file():
return _read_config_file(config_path)
if Path(_CONFIG_FILE).is_file():
return _read_config_file(_CONFIG_FILE)
return {}
def _get_top_module():
module_path = __name__.split(".")
top_module = __name__
for idx in range(1, len(module_path)):
test_module = ".".join(module_path[:-idx])
if test_module in sys.modules:
top_module = test_module
else:
break
return top_module
def get_settings(
conf_group: Optional[Dict[str, Any]], name_map: Optional[Dict[str, str]] = None
) -> Dict[Any, Any]:
"""
Lookup configuration values config, environment or KeyVault.
Parameters
----------
conf_group : Optional[Dict[str, Any]]
The configuration dictionary
name_map : Optional[Dict[str, str]], optional
Optional mapping to re-write setting names,
by default None
Returns
-------
Dict[Any, Any]
Dictionary of resolved settings
Raises
------
NotImplementedError
Keyvault storage is not yet implemented
"""
if not conf_group:
return {}
setting_dict: Dict[str, Any] = conf_group.copy()
for arg_name, arg_value in conf_group.items():
target_name = arg_name
if name_map:
target_name = name_map.get(target_name, target_name)
if isinstance(arg_value, str):
setting_dict[target_name] = arg_value
elif isinstance(arg_value, dict):
try:
setting_dict[target_name] = _fetch_setting(arg_value) # type: ignore
except NotImplementedError:
warnings.warn(
f"Setting type for setting {arg_value} not yet implemented. "
)
return setting_dict
def _fetch_setting(config_setting: Dict[str, Any]) -> Optional[str]:
"""Return required value for indirect settings (e.g. getting env var)."""
if "EnvironmentVar" in config_setting:
env_value = os.environ.get(config_setting["EnvironmentVar"])
if not env_value:
warnings.warn(
f"Environment variable {config_setting['EnvironmentVar']} "
+ " was not set"
)
return env_value
if "KeyVaultURI" in config_setting:
raise NotImplementedError("Keyvault support not yet implemented.")
return None
# read initial config when first imported.
refresh_config()
```
#### File: msticpy/tests/test_linuxsyslog.py
```python
import os
from pathlib import Path
import pandas as pd
import warnings
from ..msticpy.nbtools import pkg_config
from ..msticpy.nbtools.entityschema import Host
from ..msticpy.sectools import syslog_utils as ls
from ..msticpy.sectools import cmd_line as cl
from ..msticpy.sectools.provider_settings import get_provider_settings
from ..msticpy.sectools.geoip import GeoIPDatabaseException
_test_data_folders = [
d for d, _, _ in os.walk(os.getcwd()) if d.endswith("/tests/testdata")
]
if len(_test_data_folders) == 1:
_TEST_DATA = _test_data_folders[0]
else:
_TEST_DATA = "./tests/testdata"
def test_cluster_syslog_logons_df():
input_file = os.path.join(_TEST_DATA, "linux_logons.csv")
input_df = pd.read_csv(input_file, parse_dates=["TimeGenerated"])
output = ls.cluster_syslog_logons_df(input_df)
assert len(output.index) >= 1 # nosec
def test_host_data():
test_config1 = Path(_TEST_DATA).parent.joinpath("msticpyconfig-test.yaml")
os.environ[pkg_config._CONFIG_ENV_VAR] = str(test_config1)
with warnings.catch_warnings():
# We want to ignore warnings from missing config
warnings.simplefilter("ignore", category=UserWarning)
pkg_config.refresh_config()
syslog_file = os.path.join(_TEST_DATA, "syslog_data.csv")
syslog_df = pd.read_csv(syslog_file, parse_dates=["TimeGenerated"])
heartbeat_file = os.path.join(_TEST_DATA, "host_hb.csv")
heartbeat_df = pd.read_csv(heartbeat_file)
az_net_file = os.path.join(_TEST_DATA, "az_net.csv")
az_net_df = pd.read_csv(az_net_file)
try:
host_record = ls.create_host_record(syslog_df, heartbeat_df, az_net_df)
assert type(host_record) == Host # nosec
assert host_record.OSType == "Linux" # nosec
except GeoIPDatabaseException:
# test will fail if no GeoIP database exists or can be downloaded
other_provider_settings = get_provider_settings(
config_section="OtherProviders"
).get("GeoIPLite", {})
geolite_key = None
if other_provider_settings:
geolite_key = other_provider_settings.args.get("AuthKey")
if not geolite_key:
warnings.resetwarnings()
warnings.warn(
message=(
"No configuration value found for GeoLite key. ",
+"Test test_host_data skipped.",
)
)
return
assert False
def test_cluster_sudo_sessions():
input_file = os.path.join(_TEST_DATA, "sudo_events.csv")
input_df = pd.read_csv(input_file, parse_dates=["TimeGenerated"])
output = ls.cluster_syslog_logons_df(input_df)
assert len(output.index) >= 1 # nosec
def test_risky_sudo_sessions():
input_file = os.path.join(_TEST_DATA, "sudo_session_test.csv")
sudo_events = pd.read_csv(input_file, parse_dates=["TimeGenerated"])
risky_actions = cl.risky_cmd_line(events=sudo_events, log_type="Syslog")
suspicious_events = cl.cmd_speed(
cmd_events=sudo_events, cmd_field="Command", time=60, events=2
)
sudo_sessions = ls.cluster_syslog_logons_df(logon_events=sudo_events)
output = ls.risky_sudo_sessions(
risky_actions=risky_actions,
suspicious_actions=suspicious_events,
sudo_sessions=sudo_sessions,
)
assert len(output) == 2 # nosec
assert type(output) == dict # nosec
``` |
{
"source": "JohnLauFoo/clc_packages_Yu",
"score": 2
} |
#### File: docs/sphinxext/configtraits.py
```python
def setup(app):
app.add_object_type('configtrait', 'configtrait', objname='Config option')
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
```
#### File: doc/sphinxext/custom_roles.py
```python
from docutils import nodes
from os.path import sep
from matplotlib import rcParamsDefault
def rcparam_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
rendered = nodes.Text(f'rcParams["{text}"]')
source = inliner.document.attributes['source'].replace(sep, '/')
rel_source = source.split('/doc/', 1)[1]
levels = rel_source.count('/')
refuri = ('../' * levels +
'tutorials/introductory/customizing.html' +
f"?highlight={text}#a-sample-matplotlibrc-file")
ref = nodes.reference(rawtext, rendered, refuri=refuri)
node_list = [nodes.literal('', '', ref)]
# The default backend would be printed as "agg", but that's not correct (as
# the default is actually determined by fallback).
if text in rcParamsDefault and text != "backend":
node_list.extend([
nodes.Text(' (default: '),
nodes.literal('', repr(rcParamsDefault[text])),
nodes.Text(')'),
])
return node_list, []
def setup(app):
app.add_role("rc", rcparam_role)
return {"parallel_read_safe": True, "parallel_write_safe": True}
```
#### File: doc/sphinxext/redirect_from.py
```python
from pathlib import Path
from docutils.parsers.rst import Directive
from sphinx.domains import Domain
from sphinx.util import logging
logger = logging.getLogger(__name__)
HTML_TEMPLATE = """<html>
<head>
<meta http-equiv="refresh" content="0; url={v}">
</head>
</html>
"""
def setup(app):
RedirectFrom.app = app
app.add_directive("redirect-from", RedirectFrom)
app.add_domain(RedirectFromDomain)
app.connect("build-finished", _generate_redirects)
metadata = {'parallel_read_safe': True}
return metadata
class RedirectFromDomain(Domain):
"""
The sole purpose of this domain is a parallel_read_safe data store for the
redirects mapping.
"""
name = 'redirect_from'
label = 'redirect_from'
@property
def redirects(self):
"""The mapping of the redirectes."""
return self.data.setdefault('redirects', {})
def clear_doc(self, docnames):
self.redirects.clear()
def merge_domaindata(self, docnames, otherdata):
for src, dst in otherdata['redirects'].items():
if src not in self.redirects:
self.redirects[src] = dst
elif self.redirects[src] != dst:
raise ValueError(
f"Inconsistent redirections from {src} to "
F"{self.redirects[src]} and {otherdata.redirects[src]}")
class RedirectFrom(Directive):
required_arguments = 1
def run(self):
redirected_doc, = self.arguments
env = self.app.env
builder = self.app.builder
domain = env.get_domain('redirect_from')
current_doc = env.path2doc(self.state.document.current_source)
redirected_reldoc, _ = env.relfn2path(redirected_doc, current_doc)
if redirected_reldoc in domain.redirects:
raise ValueError(
f"{redirected_reldoc} is already noted as redirecting to "
f"{domain.redirects[redirected_reldoc]}")
domain.redirects[redirected_reldoc] = current_doc
return []
def _generate_redirects(app, exception):
builder = app.builder
if builder.name != "html" or exception:
return
for k, v in app.env.get_domain('redirect_from').redirects.items():
p = Path(app.outdir, k + builder.out_suffix)
html = HTML_TEMPLATE.format(v=builder.get_relative_uri(k, v))
if p.is_file():
if p.read_text() != html:
logger.warning(f'A redirect-from directive is trying to '
f'create {p}, but that file already exists '
f'(perhaps you need to run "make clean")')
else:
logger.info(f'making refresh html file: {k} redirect to {v}')
p.parent.mkdir(parents=True, exist_ok=True)
p.write_text(html)
```
#### File: examples/axes_grid1/demo_axes_hbox_divider.py
```python
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.axes_divider import HBoxDivider
import mpl_toolkits.axes_grid1.axes_size as Size
def make_heights_equal(fig, rect, ax1, ax2, pad):
# pad in inches
divider = HBoxDivider(
fig, rect,
horizontal=[Size.AxesX(ax1), Size.Fixed(pad), Size.AxesX(ax2)],
vertical=[Size.AxesY(ax1), Size.Scaled(1), Size.AxesY(ax2)])
ax1.set_axes_locator(divider.new_locator(0))
ax2.set_axes_locator(divider.new_locator(2))
if __name__ == "__main__":
arr1 = np.arange(20).reshape((4, 5))
arr2 = np.arange(20).reshape((5, 4))
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(arr1)
ax2.imshow(arr2)
make_heights_equal(fig, 111, ax1, ax2, pad=0.5)
fig.text(.5, .5,
"Both axes' location are adjusted\n"
"so that they have equal heights\n"
"while maintaining their aspect ratios",
va="center", ha="center",
bbox=dict(boxstyle="round, pad=1", facecolor="w"))
plt.show()
```
#### File: examples/axisartist/demo_axis_direction.py
```python
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.axisartist.angle_helper as angle_helper
import mpl_toolkits.axisartist.grid_finder as grid_finder
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
import mpl_toolkits.axisartist as axisartist
from mpl_toolkits.axisartist.grid_helper_curvelinear import \
GridHelperCurveLinear
def setup_axes(fig, rect):
"""Polar projection, but in a rectangular box."""
# see demo_curvelinear_grid.py for details
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle=360,
lat_cycle=None,
lon_minmax=None,
lat_minmax=(0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
grid_locator2 = grid_finder.MaxNLocator(5)
tick_formatter1 = angle_helper.FormatterDMS()
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
grid_locator2=grid_locator2,
tick_formatter1=tick_formatter1
)
ax1 = fig.add_subplot(
rect, axes_class=axisartist.Axes, grid_helper=grid_helper)
ax1.axis[:].toggle(ticklabels=False)
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
return ax1
def add_floating_axis1(ax1):
ax1.axis["lat"] = axis = ax1.new_floating_axis(0, 30)
axis.label.set_text(r"$\theta = 30^{\circ}$")
axis.label.set_visible(True)
return axis
def add_floating_axis2(ax1):
ax1.axis["lon"] = axis = ax1.new_floating_axis(1, 6)
axis.label.set_text(r"$r = 6$")
axis.label.set_visible(True)
return axis
fig = plt.figure(figsize=(8, 4))
fig.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.99,
wspace=0.01, hspace=0.01)
for i, d in enumerate(["bottom", "left", "top", "right"]):
ax1 = setup_axes(fig, rect=241++i)
axis = add_floating_axis1(ax1)
axis.set_axis_direction(d)
ax1.annotate(d, (0, 1), (5, -5),
xycoords="axes fraction", textcoords="offset points",
va="top", ha="left")
for i, d in enumerate(["bottom", "left", "top", "right"]):
ax1 = setup_axes(fig, rect=245++i)
axis = add_floating_axis2(ax1)
axis.set_axis_direction(d)
ax1.annotate(d, (0, 1), (5, -5),
xycoords="axes fraction", textcoords="offset points",
va="top", ha="left")
plt.show()
```
#### File: examples/event_handling/path_editor.py
```python
import numpy as np
from matplotlib.backend_bases import MouseButton
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
pathdata = [
(Path.MOVETO, (1.58, -2.57)),
(Path.CURVE4, (0.35, -1.1)),
(Path.CURVE4, (-1.75, 2.0)),
(Path.CURVE4, (0.375, 2.0)),
(Path.LINETO, (0.85, 1.15)),
(Path.CURVE4, (2.2, 3.2)),
(Path.CURVE4, (3, 0.05)),
(Path.CURVE4, (2.0, -0.5)),
(Path.CLOSEPOLY, (1.58, -2.57)),
]
codes, verts = zip(*pathdata)
path = Path(verts, codes)
patch = PathPatch(
path, facecolor='green', edgecolor='yellow', alpha=0.5)
ax.add_patch(patch)
class PathInteractor:
"""
An path editor.
Press 't' to toggle vertex markers on and off. When vertex markers are on,
they can be dragged with the mouse.
"""
showverts = True
epsilon = 5 # max pixel distance to count as a vertex hit
def __init__(self, pathpatch):
self.ax = pathpatch.axes
canvas = self.ax.figure.canvas
self.pathpatch = pathpatch
self.pathpatch.set_animated(True)
x, y = zip(*self.pathpatch.get_path().vertices)
self.line, = ax.plot(
x, y, marker='o', markerfacecolor='r', animated=True)
self._ind = None # the active vertex
canvas.mpl_connect('draw_event', self.on_draw)
canvas.mpl_connect('button_press_event', self.on_button_press)
canvas.mpl_connect('key_press_event', self.on_key_press)
canvas.mpl_connect('button_release_event', self.on_button_release)
canvas.mpl_connect('motion_notify_event', self.on_mouse_move)
self.canvas = canvas
def get_ind_under_point(self, event):
"""
Return the index of the point closest to the event position or *None*
if no point is within ``self.epsilon`` to the event position.
"""
# display coords
xy = np.asarray(self.pathpatch.get_path().vertices)
xyt = self.pathpatch.get_transform().transform(xy)
xt, yt = xyt[:, 0], xyt[:, 1]
d = np.sqrt((xt - event.x)**2 + (yt - event.y)**2)
ind = d.argmin()
if d[ind] >= self.epsilon:
ind = None
return ind
def on_draw(self, event):
"""Callback for draws."""
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.ax.draw_artist(self.pathpatch)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
def on_button_press(self, event):
"""Callback for mouse button presses."""
if (event.inaxes is None
or event.button != MouseButton.LEFT
or not self.showverts):
return
self._ind = self.get_ind_under_point(event)
def on_button_release(self, event):
"""Callback for mouse button releases."""
if (event.button != MouseButton.LEFT
or not self.showverts):
return
self._ind = None
def on_key_press(self, event):
"""Callback for key presses."""
if not event.inaxes:
return
if event.key == 't':
self.showverts = not self.showverts
self.line.set_visible(self.showverts)
if not self.showverts:
self._ind = None
self.canvas.draw()
def on_mouse_move(self, event):
"""Callback for mouse movements."""
if (self._ind is None
or event.inaxes is None
or event.button != MouseButton.LEFT
or not self.showverts):
return
vertices = self.pathpatch.get_path().vertices
vertices[self._ind] = event.xdata, event.ydata
self.line.set_data(zip(*vertices))
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.pathpatch)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
interactor = PathInteractor(patch)
ax.set_title('drag vertices to update path')
ax.set_xlim(-3, 4)
ax.set_ylim(-3, 4)
plt.show()
```
#### File: examples/images_contours_and_fields/image_transparency_blend.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
def normal_pdf(x, mean, var):
return np.exp(-(x - mean)**2 / (2*var))
# Generate the space in which the blobs will live
xmin, xmax, ymin, ymax = (0, 100, 0, 100)
n_bins = 100
xx = np.linspace(xmin, xmax, n_bins)
yy = np.linspace(ymin, ymax, n_bins)
# Generate the blobs. The range of the values is roughly -.0002 to .0002
means_high = [20, 50]
means_low = [50, 60]
var = [150, 200]
gauss_x_high = normal_pdf(xx, means_high[0], var[0])
gauss_y_high = normal_pdf(yy, means_high[1], var[0])
gauss_x_low = normal_pdf(xx, means_low[0], var[1])
gauss_y_low = normal_pdf(yy, means_low[1], var[1])
weights = (np.outer(gauss_y_high, gauss_x_high)
- np.outer(gauss_y_low, gauss_x_low))
# We'll also create a grey background into which the pixels will fade
greys = np.full((*weights.shape, 3), 70, dtype=np.uint8)
# First we'll plot these blobs using ``imshow`` without transparency.
vmax = np.abs(weights).max()
imshow_kwargs = {
'vmax': vmax,
'vmin': -vmax,
'cmap': 'RdYlBu',
'extent': (xmin, xmax, ymin, ymax),
}
fig, ax = plt.subplots()
ax.imshow(greys)
ax.imshow(weights, **imshow_kwargs)
ax.set_axis_off()
###############################################################################
# Blending in transparency
# ========================
#
# The simplest way to include transparency when plotting data with
# `matplotlib.pyplot.imshow` is to pass an array matching the shape of
# the data to the ``alpha`` argument. For example, we'll create a gradient
# moving from left to right below.
# Create an alpha channel of linearly increasing values moving to the right.
alphas = np.ones(weights.shape)
alphas[:, 30:] = np.linspace(1, 0, 70)
# Create the figure and image
# Note that the absolute values may be slightly different
fig, ax = plt.subplots()
ax.imshow(greys)
ax.imshow(weights, alpha=alphas, **imshow_kwargs)
ax.set_axis_off()
###############################################################################
# Using transparency to highlight values with high amplitude
# ==========================================================
#
# Finally, we'll recreate the same plot, but this time we'll use transparency
# to highlight the extreme values in the data. This is often used to highlight
# data points with smaller p-values. We'll also add in contour lines to
# highlight the image values.
# Create an alpha channel based on weight values
# Any value whose absolute value is > .0001 will have zero transparency
alphas = Normalize(0, .3, clip=True)(np.abs(weights))
alphas = np.clip(alphas, .4, 1) # alpha value clipped at the bottom at .4
# Create the figure and image
# Note that the absolute values may be slightly different
fig, ax = plt.subplots()
ax.imshow(greys)
ax.imshow(weights, alpha=alphas, **imshow_kwargs)
# Add contour lines to further highlight different levels.
ax.contour(weights[::-1], levels=[-.1, .1], colors='k', linestyles='-')
ax.set_axis_off()
plt.show()
ax.contour(weights[::-1], levels=[-.0001, .0001], colors='k', linestyles='-')
ax.set_axis_off()
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.axes.Axes.imshow` / `matplotlib.pyplot.imshow`
# - `matplotlib.axes.Axes.contour` / `matplotlib.pyplot.contour`
# - `matplotlib.colors.Normalize`
# - `matplotlib.axes.Axes.set_axis_off`
```
#### File: examples/lines_bars_and_markers/filled_step.py
```python
import itertools
from collections import OrderedDict
from functools import partial
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from cycler import cycler
def filled_hist(ax, edges, values, bottoms=None, orientation='v',
**kwargs):
"""
Draw a histogram as a stepped patch.
Extra kwargs are passed through to `fill_between`
Parameters
----------
ax : Axes
The axes to plot to
edges : array
A length n+1 array giving the left edges of each bin and the
right edge of the last bin.
values : array
A length n array of bin counts or values
bottoms : float or array, optional
A length n array of the bottom of the bars. If None, zero is used.
orientation : {'v', 'h'}
Orientation of the histogram. 'v' (default) has
the bars increasing in the positive y-direction.
Returns
-------
ret : PolyCollection
Artist added to the Axes
"""
print(orientation)
if orientation not in 'hv':
raise ValueError("orientation must be in {{'h', 'v'}} "
"not {o}".format(o=orientation))
kwargs.setdefault('step', 'post')
edges = np.asarray(edges)
values = np.asarray(values)
if len(edges) - 1 != len(values):
raise ValueError('Must provide one more bin edge than value not: '
'len(edges): {lb} len(values): {lv}'.format(
lb=len(edges), lv=len(values)))
if bottoms is None:
bottoms = 0
bottoms = np.broadcast_to(bottoms, values.shape)
values = np.append(values, values[-1])
bottoms = np.append(bottoms, bottoms[-1])
if orientation == 'h':
return ax.fill_betweenx(edges, values, bottoms,
**kwargs)
elif orientation == 'v':
return ax.fill_between(edges, values, bottoms,
**kwargs)
else:
raise AssertionError("you should never be here")
def stack_hist(ax, stacked_data, sty_cycle, bottoms=None,
hist_func=None, labels=None,
plot_func=None, plot_kwargs=None):
"""
Parameters
----------
ax : axes.Axes
The axes to add artists too
stacked_data : array or Mapping
A (N, M) shaped array. The first dimension will be iterated over to
compute histograms row-wise
sty_cycle : Cycler or operable of dict
Style to apply to each set
bottoms : array, default: 0
The initial positions of the bottoms.
hist_func : callable, optional
Must have signature `bin_vals, bin_edges = f(data)`.
`bin_edges` expected to be one longer than `bin_vals`
labels : list of str, optional
The label for each set.
If not given and stacked data is an array defaults to 'default set {n}'
If stacked_data is a mapping, and labels is None, default to the keys
(which may come out in a random order).
If stacked_data is a mapping and labels is given then only
the columns listed by be plotted.
plot_func : callable, optional
Function to call to draw the histogram must have signature:
ret = plot_func(ax, edges, top, bottoms=bottoms,
label=label, **kwargs)
plot_kwargs : dict, optional
Any extra kwargs to pass through to the plotting function. This
will be the same for all calls to the plotting function and will
over-ride the values in cycle.
Returns
-------
arts : dict
Dictionary of artists keyed on their labels
"""
# deal with default binning function
if hist_func is None:
hist_func = np.histogram
# deal with default plotting function
if plot_func is None:
plot_func = filled_hist
# deal with default
if plot_kwargs is None:
plot_kwargs = {}
print(plot_kwargs)
try:
l_keys = stacked_data.keys()
label_data = True
if labels is None:
labels = l_keys
except AttributeError:
label_data = False
if labels is None:
labels = itertools.repeat(None)
if label_data:
loop_iter = enumerate((stacked_data[lab], lab, s)
for lab, s in zip(labels, sty_cycle))
else:
loop_iter = enumerate(zip(stacked_data, labels, sty_cycle))
arts = {}
for j, (data, label, sty) in loop_iter:
if label is None:
label = 'dflt set {n}'.format(n=j)
label = sty.pop('label', label)
vals, edges = hist_func(data)
if bottoms is None:
bottoms = np.zeros_like(vals)
top = bottoms + vals
print(sty)
sty.update(plot_kwargs)
print(sty)
ret = plot_func(ax, edges, top, bottoms=bottoms,
label=label, **sty)
bottoms = top
arts[label] = ret
ax.legend(fontsize=10)
return arts
# set up histogram function to fixed bins
edges = np.linspace(-3, 3, 20, endpoint=True)
hist_func = partial(np.histogram, bins=edges)
# set up style cycles
color_cycle = cycler(facecolor=plt.rcParams['axes.prop_cycle'][:4])
label_cycle = cycler(label=['set {n}'.format(n=n) for n in range(4)])
hatch_cycle = cycler(hatch=['/', '*', '+', '|'])
# Fixing random state for reproducibility
np.random.seed(19680801)
stack_data = np.random.randn(4, 12250)
dict_data = OrderedDict(zip((c['label'] for c in label_cycle), stack_data))
###############################################################################
# Work with plain arrays
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4.5), tight_layout=True)
arts = stack_hist(ax1, stack_data, color_cycle + label_cycle + hatch_cycle,
hist_func=hist_func)
arts = stack_hist(ax2, stack_data, color_cycle,
hist_func=hist_func,
plot_kwargs=dict(edgecolor='w', orientation='h'))
ax1.set_ylabel('counts')
ax1.set_xlabel('x')
ax2.set_xlabel('counts')
ax2.set_ylabel('x')
###############################################################################
# Work with labeled data
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 4.5),
tight_layout=True, sharey=True)
arts = stack_hist(ax1, dict_data, color_cycle + hatch_cycle,
hist_func=hist_func)
arts = stack_hist(ax2, dict_data, color_cycle + hatch_cycle,
hist_func=hist_func, labels=['set 0', 'set 3'])
ax1.xaxis.set_major_locator(mticker.MaxNLocator(5))
ax1.set_xlabel('counts')
ax1.set_ylabel('x')
ax2.set_ylabel('x')
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.axes.Axes.fill_betweenx` / `matplotlib.pyplot.fill_betweenx`
# - `matplotlib.axes.Axes.fill_between` / `matplotlib.pyplot.fill_between`
# - `matplotlib.axis.Axis.set_major_locator`
```
#### File: examples/lines_bars_and_markers/hat_graph.py
```python
import numpy as np
import matplotlib.pyplot as plt
def hat_graph(ax, xlabels, values, group_labels):
"""
Create a hat graph.
Parameters
----------
ax : matplotlib.axes.Axes
The Axes to plot into.
xlabels : list of str
The category names to be displayed on the x-axis.
values : (M, N) array-like
The data values.
Rows are the groups (len(group_labels) == M).
Columns are the categories (len(xlabels) == N).
group_labels : list of str
The group labels displayed in the legend.
"""
def label_bars(heights, rects):
"""Attach a text label on top of each bar."""
for height, rect in zip(heights, rects):
ax.annotate(f'{height}',
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 4), # 4 points vertical offset.
textcoords='offset points',
ha='center', va='bottom')
values = np.asarray(values)
x = np.arange(values.shape[1])
ax.set_xticks(x)
ax.set_xticklabels(xlabels)
spacing = 0.3 # spacing between hat groups
width = (1 - spacing) / values.shape[0]
heights0 = values[0]
for i, (heights, group_label) in enumerate(zip(values, group_labels)):
style = {'fill': False} if i == 0 else {'edgecolor': 'black'}
rects = ax.bar(x - spacing/2 + i * width, heights - heights0,
width, bottom=heights0, label=group_label, **style)
label_bars(heights, rects)
# initialise labels and a numpy array make sure you have
# N labels of N number of values in the array
xlabels = ['I', 'II', 'III', 'IV', 'V']
playerA = np.array([5, 15, 22, 20, 25])
playerB = np.array([25, 32, 34, 30, 27])
fig, ax = plt.subplots()
hat_graph(ax, xlabels, [playerA, playerB], ['Player A', 'Player B'])
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_xlabel('Games')
ax.set_ylabel('Score')
ax.set_ylim(0, 60)
ax.set_title('Scores by number of game and players')
ax.legend()
fig.tight_layout()
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.axes.Axes.bar` / `matplotlib.pyplot.bar`
# - `matplotlib.axes.Axes.annotate` / `matplotlib.pyplot.annotate`
```
#### File: examples/lines_bars_and_markers/marker_reference.py
```python
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
text_style = dict(horizontalalignment='right', verticalalignment='center',
fontsize=12, fontfamily='monospace')
marker_style = dict(linestyle=':', color='0.8', markersize=10,
markerfacecolor="tab:blue", markeredgecolor="tab:blue")
def format_axes(ax):
ax.margins(0.2)
ax.set_axis_off()
ax.invert_yaxis()
def split_list(a_list):
i_half = len(a_list) // 2
return a_list[:i_half], a_list[i_half:]
###############################################################################
# Unfilled markers
# ================
# Unfilled markers are single-colored.
fig, axs = plt.subplots(ncols=2)
fig.suptitle('Un-filled markers', fontsize=14)
# Filter out filled markers and marker settings that do nothing.
unfilled_markers = [m for m, func in Line2D.markers.items()
if func != 'nothing' and m not in Line2D.filled_markers]
for ax, markers in zip(axs, split_list(unfilled_markers)):
for y, marker in enumerate(markers):
ax.text(-0.5, y, repr(marker), **text_style)
ax.plot([y] * 3, marker=marker, **marker_style)
format_axes(ax)
plt.show()
###############################################################################
# Filled markers
# ==============
fig, axs = plt.subplots(ncols=2)
fig.suptitle('Filled markers', fontsize=14)
for ax, markers in zip(axs, split_list(Line2D.filled_markers)):
for y, marker in enumerate(markers):
ax.text(-0.5, y, repr(marker), **text_style)
ax.plot([y] * 3, marker=marker, **marker_style)
format_axes(ax)
plt.show()
###############################################################################
# .. _marker_fill_styles:
#
# Marker fill styles
# ------------------
# The edge color and fill color of filled markers can be specified separately.
# Additionally, the ``fillstyle`` can be configured to be unfilled, fully
# filled, or half-filled in various directions. The half-filled styles use
# ``markerfacecoloralt`` as secondary fill color.
fig, ax = plt.subplots()
fig.suptitle('Marker fillstyle', fontsize=14)
fig.subplots_adjust(left=0.4)
filled_marker_style = dict(marker='o', linestyle=':', markersize=15,
color='darkgrey',
markerfacecolor='tab:blue',
markerfacecoloralt='lightsteelblue',
markeredgecolor='brown')
for y, fill_style in enumerate(Line2D.fillStyles):
ax.text(-0.5, y, repr(fill_style), **text_style)
ax.plot([y] * 3, fillstyle=fill_style, **filled_marker_style)
format_axes(ax)
plt.show()
###############################################################################
# Markers created from TeX symbols
# ================================
#
# Use :doc:`MathText </tutorials/text/mathtext>`, to use custom marker symbols,
# like e.g. ``"$\u266B$"``. For an overview over the STIX font symbols refer
# to the `STIX font table <http://www.stixfonts.org/allGlyphs.html>`_.
# Also see the :doc:`/gallery/text_labels_and_annotations/stix_fonts_demo`.
fig, ax = plt.subplots()
fig.suptitle('Mathtext markers', fontsize=14)
fig.subplots_adjust(left=0.4)
marker_style.update(markeredgecolor="None", markersize=15)
markers = ["$1$", r"$\frac{1}{2}$", "$f$", "$\u266B$", r"$\mathcal{A}$"]
for y, marker in enumerate(markers):
# Escape dollars so that the text is written "as is", not as mathtext.
ax.text(-0.5, y, repr(marker).replace("$", r"\$"), **text_style)
ax.plot([y] * 3, marker=marker, **marker_style)
format_axes(ax)
plt.show()
```
#### File: examples/mplot3d/pathpatch3d.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, PathPatch
from matplotlib.text import TextPath
from matplotlib.transforms import Affine2D
import mpl_toolkits.mplot3d.art3d as art3d
def text3d(ax, xyz, s, zdir="z", size=None, angle=0, usetex=False, **kwargs):
"""
Plots the string *s* on the axes *ax*, with position *xyz*, size *size*,
and rotation angle *angle*. *zdir* gives the axis which is to be treated as
the third dimension. *usetex* is a boolean indicating whether the string
should be run through a LaTeX subprocess or not. Any additional keyword
arguments are forwarded to `.transform_path`.
Note: zdir affects the interpretation of xyz.
"""
x, y, z = xyz
if zdir == "y":
xy1, z1 = (x, z), y
elif zdir == "x":
xy1, z1 = (y, z), x
else:
xy1, z1 = (x, y), z
text_path = TextPath((0, 0), s, size=size, usetex=usetex)
trans = Affine2D().rotate(angle).translate(xy1[0], xy1[1])
p1 = PathPatch(trans.transform_path(text_path), **kwargs)
ax.add_patch(p1)
art3d.pathpatch_2d_to_3d(p1, z=z1, zdir=zdir)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
# Draw a circle on the x=0 'wall'
p = Circle((5, 5), 3)
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=0, zdir="x")
# Manually label the axes
text3d(ax, (4, -2, 0), "X-axis", zdir="z", size=.5, usetex=False,
ec="none", fc="k")
text3d(ax, (12, 4, 0), "Y-axis", zdir="z", size=.5, usetex=False,
angle=np.pi / 2, ec="none", fc="k")
text3d(ax, (12, 10, 4), "Z-axis", zdir="y", size=.5, usetex=False,
angle=np.pi / 2, ec="none", fc="k")
# Write a Latex formula on the z=0 'floor'
text3d(ax, (1, 5, 0),
r"$\displaystyle G_{\mu\nu} + \Lambda g_{\mu\nu} = "
r"\frac{8\pi G}{c^4} T_{\mu\nu} $",
zdir="z", size=1, usetex=True,
ec="none", fc="k")
ax.set_xlim(0, 10)
ax.set_ylim(0, 10)
ax.set_zlim(0, 10)
plt.show()
```
#### File: examples/pyplots/whats_new_99_axes_grid.py
```python
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.axes_rgb import RGBAxes
def get_demo_image():
# prepare image
delta = 0.5
extent = (-3, 4, -4, 3)
x = np.arange(-3.0, 4.001, delta)
y = np.arange(-4.0, 3.001, delta)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)
Z = (Z1 - Z2) * 2
return Z, extent
def get_rgb():
Z, extent = get_demo_image()
Z[Z < 0] = 0.
Z = Z / Z.max()
R = Z[:13, :13]
G = Z[2:, 2:]
B = Z[:13, 2:]
return R, G, B
fig = plt.figure()
ax = RGBAxes(fig, [0.1, 0.1, 0.8, 0.8])
r, g, b = get_rgb()
ax.imshow_rgb(r, g, b, origin="lower")
ax.RGB.set_xlim(0., 9.5)
ax.RGB.set_ylim(0.9, 10.6)
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `mpl_toolkits.axes_grid1.axes_rgb.RGBAxes`
# - `mpl_toolkits.axes_grid1.axes_rgb.RGBAxes.imshow_rgb`
```
#### File: examples/statistics/confidence_ellipse.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
#############################################################################
#
# The plotting function itself
# """"""""""""""""""""""""""""
#
# This function plots the confidence ellipse of the covariance of the given
# array-like variables x and y. The ellipse is plotted into the given
# axes-object ax.
#
# The radiuses of the ellipse can be controlled by n_std which is the number
# of standard deviations. The default value is 3 which makes the ellipse
# enclose 99.4% of the points if the data is normally distributed
# like in these examples (3 standard deviations in 1-D contain 99.7%
# of the data, which is 99.4% of the data in 2-D).
def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):
"""
Create a plot of the covariance confidence ellipse of *x* and *y*.
Parameters
----------
x, y : array-like, shape (n, )
Input data.
ax : matplotlib.axes.Axes
The axes object to draw the ellipse into.
n_std : float
The number of standard deviations to determine the ellipse's radiuses.
**kwargs
Forwarded to `~matplotlib.patches.Ellipse`
Returns
-------
matplotlib.patches.Ellipse
"""
if x.size != y.size:
raise ValueError("x and y must be the same size")
cov = np.cov(x, y)
pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])
# Using a special case to obtain the eigenvalues of this
# two-dimensionl dataset.
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,
facecolor=facecolor, **kwargs)
# Calculating the stdandard deviation of x from
# the squareroot of the variance and multiplying
# with the given number of standard deviations.
scale_x = np.sqrt(cov[0, 0]) * n_std
mean_x = np.mean(x)
# calculating the stdandard deviation of y ...
scale_y = np.sqrt(cov[1, 1]) * n_std
mean_y = np.mean(y)
transf = transforms.Affine2D() \
.rotate_deg(45) \
.scale(scale_x, scale_y) \
.translate(mean_x, mean_y)
ellipse.set_transform(transf + ax.transData)
return ax.add_patch(ellipse)
#############################################################################
#
# A helper function to create a correlated dataset
# """"""""""""""""""""""""""""""""""""""""""""""""
#
# Creates a random two-dimesional dataset with the specified
# two-dimensional mean (mu) and dimensions (scale).
# The correlation can be controlled by the param 'dependency',
# a 2x2 matrix.
def get_correlated_dataset(n, dependency, mu, scale):
latent = np.random.randn(n, 2)
dependent = latent.dot(dependency)
scaled = dependent * scale
scaled_with_offset = scaled + mu
# return x and y of the new, correlated dataset
return scaled_with_offset[:, 0], scaled_with_offset[:, 1]
#############################################################################
#
# Positive, negative and weak correlation
# """""""""""""""""""""""""""""""""""""""
#
# Note that the shape for the weak correlation (right) is an ellipse,
# not a circle because x and y are differently scaled.
# However, the fact that x and y are uncorrelated is shown by
# the axes of the ellipse being aligned with the x- and y-axis
# of the coordinate system.
np.random.seed(0)
PARAMETERS = {
'Positive correlation': [[0.85, 0.35],
[0.15, -0.65]],
'Negative correlation': [[0.9, -0.4],
[0.1, -0.6]],
'Weak correlation': [[1, 0],
[0, 1]],
}
mu = 2, 4
scale = 3, 5
fig, axs = plt.subplots(1, 3, figsize=(9, 3))
for ax, (title, dependency) in zip(axs, PARAMETERS.items()):
x, y = get_correlated_dataset(800, dependency, mu, scale)
ax.scatter(x, y, s=0.5)
ax.axvline(c='grey', lw=1)
ax.axhline(c='grey', lw=1)
confidence_ellipse(x, y, ax, edgecolor='red')
ax.scatter(mu[0], mu[1], c='red', s=3)
ax.set_title(title)
plt.show()
#############################################################################
#
# Different number of standard deviations
# """""""""""""""""""""""""""""""""""""""
#
# A plot with n_std = 3 (blue), 2 (purple) and 1 (red)
fig, ax_nstd = plt.subplots(figsize=(6, 6))
dependency_nstd = [[0.8, 0.75],
[-0.2, 0.35]]
mu = 0, 0
scale = 8, 5
ax_nstd.axvline(c='grey', lw=1)
ax_nstd.axhline(c='grey', lw=1)
x, y = get_correlated_dataset(500, dependency_nstd, mu, scale)
ax_nstd.scatter(x, y, s=0.5)
confidence_ellipse(x, y, ax_nstd, n_std=1,
label=r'$1\sigma$', edgecolor='firebrick')
confidence_ellipse(x, y, ax_nstd, n_std=2,
label=r'$2\sigma$', edgecolor='fuchsia', linestyle='--')
confidence_ellipse(x, y, ax_nstd, n_std=3,
label=r'$3\sigma$', edgecolor='blue', linestyle=':')
ax_nstd.scatter(mu[0], mu[1], c='red', s=3)
ax_nstd.set_title('Different standard deviations')
ax_nstd.legend()
plt.show()
#############################################################################
#
# Using the keyword arguments
# """""""""""""""""""""""""""
#
# Use the kwargs specified for matplotlib.patches.Patch in order
# to have the ellipse rendered in different ways.
fig, ax_kwargs = plt.subplots(figsize=(6, 6))
dependency_kwargs = [[-0.8, 0.5],
[-0.2, 0.5]]
mu = 2, -3
scale = 6, 5
ax_kwargs.axvline(c='grey', lw=1)
ax_kwargs.axhline(c='grey', lw=1)
x, y = get_correlated_dataset(500, dependency_kwargs, mu, scale)
# Plot the ellipse with zorder=0 in order to demonstrate
# its transparency (caused by the use of alpha).
confidence_ellipse(x, y, ax_kwargs,
alpha=0.5, facecolor='pink', edgecolor='purple', zorder=0)
ax_kwargs.scatter(x, y, s=0.5)
ax_kwargs.scatter(mu[0], mu[1], c='red', s=3)
ax_kwargs.set_title('Using kwargs')
fig.subplots_adjust(hspace=0.25)
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.transforms.Affine2D`
# - `matplotlib.patches.Ellipse`
```
#### File: examples/subplots_axes_and_figures/secondary_axis.py
```python
import matplotlib.pyplot as plt
import numpy as np
import datetime
import matplotlib.dates as mdates
from matplotlib.ticker import AutoMinorLocator
fig, ax = plt.subplots(constrained_layout=True)
x = np.arange(0, 360, 1)
y = np.sin(2 * x * np.pi / 180)
ax.plot(x, y)
ax.set_xlabel('angle [degrees]')
ax.set_ylabel('signal')
ax.set_title('Sine wave')
def deg2rad(x):
return x * np.pi / 180
def rad2deg(x):
return x * 180 / np.pi
secax = ax.secondary_xaxis('top', functions=(deg2rad, rad2deg))
secax.set_xlabel('angle [rad]')
plt.show()
###########################################################################
# Here is the case of converting from wavenumber to wavelength in a
# log-log scale.
#
# .. note::
#
# In this case, the xscale of the parent is logarithmic, so the child is
# made logarithmic as well.
fig, ax = plt.subplots(constrained_layout=True)
x = np.arange(0.02, 1, 0.02)
np.random.seed(19680801)
y = np.random.randn(len(x)) ** 2
ax.loglog(x, y)
ax.set_xlabel('f [Hz]')
ax.set_ylabel('PSD')
ax.set_title('Random spectrum')
def one_over(x):
"""Vectorized 1/x, treating x==0 manually"""
x = np.array(x).astype(float)
near_zero = np.isclose(x, 0)
x[near_zero] = np.inf
x[~near_zero] = 1 / x[~near_zero]
return x
# the function "1/x" is its own inverse
inverse = one_over
secax = ax.secondary_xaxis('top', functions=(one_over, inverse))
secax.set_xlabel('period [s]')
plt.show()
###########################################################################
# Sometime we want to relate the axes in a transform that is ad-hoc from
# the data, and is derived empirically. In that case we can set the
# forward and inverse transforms functions to be linear interpolations from the
# one data set to the other.
#
# .. note::
#
# In order to properly handle the data margins, the mapping functions
# (``forward`` and ``inverse`` in this example) need to be defined beyond the
# nominal plot limits.
#
# In the specific case of the numpy linear interpolation, `numpy.interp`,
# this condition can be arbitrarily enforced by providing optional kwargs
# *left*, *right* such that values outside the data range are mapped
# well outside the plot limits.
fig, ax = plt.subplots(constrained_layout=True)
xdata = np.arange(1, 11, 0.4)
ydata = np.random.randn(len(xdata))
ax.plot(xdata, ydata, label='Plotted data')
xold = np.arange(0, 11, 0.2)
# fake data set relating x coordinate to another data-derived coordinate.
# xnew must be monotonic, so we sort...
xnew = np.sort(10 * np.exp(-xold / 4) + np.random.randn(len(xold)) / 3)
ax.plot(xold[3:], xnew[3:], label='Transform data')
ax.set_xlabel('X [m]')
ax.legend()
def forward(x):
return np.interp(x, xold, xnew)
def inverse(x):
return np.interp(x, xnew, xold)
secax = ax.secondary_xaxis('top', functions=(forward, inverse))
secax.xaxis.set_minor_locator(AutoMinorLocator())
secax.set_xlabel('$X_{other}$')
plt.show()
###########################################################################
# A final example translates np.datetime64 to yearday on the x axis and
# from Celsius to Fahrenheit on the y axis. Note the addition of a
# third y axis, and that it can be placed using a float for the
# location argument
dates = [datetime.datetime(2018, 1, 1) + datetime.timedelta(hours=k * 6)
for k in range(240)]
temperature = np.random.randn(len(dates)) * 4 + 6.7
fig, ax = plt.subplots(constrained_layout=True)
ax.plot(dates, temperature)
ax.set_ylabel(r'$T\ [^oC]$')
plt.xticks(rotation=70)
def date2yday(x):
"""Convert matplotlib datenum to days since 2018-01-01."""
y = x - mdates.date2num(datetime.datetime(2018, 1, 1))
return y
def yday2date(x):
"""Return a matplotlib datenum for *x* days after 2018-01-01."""
y = x + mdates.date2num(datetime.datetime(2018, 1, 1))
return y
secax_x = ax.secondary_xaxis('top', functions=(date2yday, yday2date))
secax_x.set_xlabel('yday [2018]')
def celsius_to_fahrenheit(x):
return x * 1.8 + 32
def fahrenheit_to_celsius(x):
return (x - 32) / 1.8
secax_y = ax.secondary_yaxis(
'right', functions=(celsius_to_fahrenheit, fahrenheit_to_celsius))
secax_y.set_ylabel(r'$T\ [^oF]$')
def celsius_to_anomaly(x):
return (x - np.mean(temperature))
def anomaly_to_celsius(x):
return (x + np.mean(temperature))
# use of a float for the position:
secax_y2 = ax.secondary_yaxis(
1.2, functions=(celsius_to_anomaly, anomaly_to_celsius))
secax_y2.set_ylabel(r'$T - \overline{T}\ [^oC]$')
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.axes.Axes.secondary_xaxis`
# - `matplotlib.axes.Axes.secondary_yaxis`
```
#### File: examples/subplots_axes_and_figures/subfigures.py
```python
import matplotlib.pyplot as plt
import numpy as np
def example_plot(ax, fontsize=12, hide_labels=False):
pc = ax.pcolormesh(np.random.randn(30, 30), vmin=-2.5, vmax=2.5)
if not hide_labels:
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Title', fontsize=fontsize)
return pc
np.random.seed(19680808)
# gridspec inside gridspec
fig = plt.figure(constrained_layout=True, figsize=(10, 4))
subfigs = fig.subfigures(1, 2, wspace=0.07)
axsLeft = subfigs[0].subplots(1, 2, sharey=True)
subfigs[0].set_facecolor('0.75')
for ax in axsLeft:
pc = example_plot(ax)
subfigs[0].suptitle('Left plots', fontsize='x-large')
subfigs[0].colorbar(pc, shrink=0.6, ax=axsLeft, location='bottom')
axsRight = subfigs[1].subplots(3, 1, sharex=True)
for nn, ax in enumerate(axsRight):
pc = example_plot(ax, hide_labels=True)
if nn == 2:
ax.set_xlabel('xlabel')
if nn == 1:
ax.set_ylabel('ylabel')
subfigs[1].set_facecolor('0.85')
subfigs[1].colorbar(pc, shrink=0.6, ax=axsRight)
subfigs[1].suptitle('Right plots', fontsize='x-large')
fig.suptitle('Figure suptitle', fontsize='xx-large')
plt.show()
##############################################################################
# It is possible to mix subplots and subfigures using
# `matplotlib.figure.Figure.add_subfigure`. This requires getting
# the gridspec that the subplots are laid out on.
fig, axs = plt.subplots(2, 3, constrained_layout=True, figsize=(10, 4))
gridspec = axs[0, 0].get_subplotspec().get_gridspec()
# clear the left column for the subfigure:
for a in axs[:, 0]:
a.remove()
# plot data in remaining axes:
for a in axs[:, 1:].flat:
a.plot(np.arange(10))
# make the subfigure in the empty gridspec slots:
subfig = fig.add_subfigure(gridspec[:, 0])
axsLeft = subfig.subplots(1, 2, sharey=True)
subfig.set_facecolor('0.75')
for ax in axsLeft:
pc = example_plot(ax)
subfig.suptitle('Left plots', fontsize='x-large')
subfig.colorbar(pc, shrink=0.6, ax=axsLeft, location='bottom')
fig.suptitle('Figure suptitle', fontsize='xx-large')
plt.show()
##############################################################################
# Subfigures can have different widths and heights. This is exactly the
# same example as the first example, but *width_ratios* has been changed:
fig = plt.figure(constrained_layout=True, figsize=(10, 4))
subfigs = fig.subfigures(1, 2, wspace=0.07, width_ratios=[2, 1])
axsLeft = subfigs[0].subplots(1, 2, sharey=True)
subfigs[0].set_facecolor('0.75')
for ax in axsLeft:
pc = example_plot(ax)
subfigs[0].suptitle('Left plots', fontsize='x-large')
subfigs[0].colorbar(pc, shrink=0.6, ax=axsLeft, location='bottom')
axsRight = subfigs[1].subplots(3, 1, sharex=True)
for nn, ax in enumerate(axsRight):
pc = example_plot(ax, hide_labels=True)
if nn == 2:
ax.set_xlabel('xlabel')
if nn == 1:
ax.set_ylabel('ylabel')
subfigs[1].set_facecolor('0.85')
subfigs[1].colorbar(pc, shrink=0.6, ax=axsRight)
subfigs[1].suptitle('Right plots', fontsize='x-large')
fig.suptitle('Figure suptitle', fontsize='xx-large')
plt.show()
##############################################################################
# Subfigures can be also be nested:
fig = plt.figure(constrained_layout=True, figsize=(10, 8))
fig.suptitle('fig')
subfigs = fig.subfigures(1, 2, wspace=0.07)
subfigs[0].set_facecolor('coral')
subfigs[0].suptitle('subfigs[0]')
subfigs[1].set_facecolor('coral')
subfigs[1].suptitle('subfigs[1]')
subfigsnest = subfigs[0].subfigures(2, 1, height_ratios=[1, 1.4])
subfigsnest[0].suptitle('subfigsnest[0]')
subfigsnest[0].set_facecolor('r')
axsnest0 = subfigsnest[0].subplots(1, 2, sharey=True)
for nn, ax in enumerate(axsnest0):
pc = example_plot(ax, hide_labels=True)
subfigsnest[0].colorbar(pc, ax=axsnest0)
subfigsnest[1].suptitle('subfigsnest[1]')
subfigsnest[1].set_facecolor('g')
axsnest1 = subfigsnest[1].subplots(3, 1, sharex=True)
axsRight = subfigs[1].subplots(2, 2)
plt.show()
```
#### File: examples/text_labels_and_annotations/usetex_baseline_test.py
```python
import matplotlib.pyplot as plt
import matplotlib.axes as maxes
plt.rcParams.update({"mathtext.fontset": "cm", "mathtext.rm": "serif"})
@maxes.subplot_class_factory
class LatexPreviewSubplot(maxes.Axes):
"""
A hackish way to simultaneously draw texts with text.latex.preview=True and
text.latex.preview=False in the same figure. It does not work with the ps
backend.
"""
def __init__(self, *args, preview=False, **kwargs):
self.preview = preview
super().__init__(*args, **kwargs)
def draw(self, renderer):
from matplotlib import _api # internal, *do not use*
with _api.suppress_matplotlib_deprecation_warning():
with plt.rc_context({"text.latex.preview": self.preview}):
super().draw(renderer)
def test_window_extent(ax, usetex, preview):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
test_strings = ["lg", r"$\frac{1}{2}\pi$",
r"$p^{3^A}$", r"$p_{3_2}$"]
ax.axvline(0, color="r")
for i, s in enumerate(test_strings):
ax.axhline(i, color="r")
ax.text(0., 3 - i, s,
usetex=usetex,
verticalalignment="baseline",
size=50,
bbox=dict(pad=0, ec="k", fc="none"))
ax.set_xlim(-0.1, 1.1)
ax.set_ylim(-.8, 3.9)
title = f"usetex={usetex}\n"
if usetex:
title += f"preview={preview}"
ax.set_title(title)
fig = plt.figure(figsize=(2 * 3, 6.5))
for i, usetex, preview in [[0, False, False],
[1, True, False],
[2, True, True]]:
ax = LatexPreviewSubplot(fig, 1, 3, i + 1, preview=preview)
fig.add_subplot(ax)
fig.subplots_adjust(top=0.85)
test_window_extent(ax, usetex=usetex, preview=preview)
plt.show()
```
#### File: examples/ticks_and_spines/tick-formatters.py
```python
import matplotlib.pyplot as plt
from matplotlib import ticker
def setup(ax, title):
"""Set up common parameters for the Axes in the example."""
# only show the bottom spine
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.spines.right.set_color('none')
ax.spines.left.set_color('none')
ax.spines.top.set_color('none')
# define tick positions
ax.xaxis.set_major_locator(ticker.MultipleLocator(1.00))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(which='major', width=1.00, length=5)
ax.tick_params(which='minor', width=0.75, length=2.5, labelsize=10)
ax.set_xlim(0, 5)
ax.set_ylim(0, 1)
ax.text(0.0, 0.2, title, transform=ax.transAxes,
fontsize=14, fontname='Monospace', color='tab:blue')
# Tick formatters can be set in one of two ways, either by passing a ``str``
# or function to `~.Axis.set_major_formatter` or `~.Axis.set_minor_formatter`,
# or by creating an instance of one of the various `~.ticker.Formatter` classes
# and providing that to `~.Axis.set_major_formatter` or
# `~.Axis.set_minor_formatter`.
# The first two examples directly pass a ``str`` or function.
fig0, axs0 = plt.subplots(2, 1, figsize=(8, 2))
fig0.suptitle('Simple Formatting')
# A ``str``, using format string function syntax, can be used directly as a
# formatter. The variable ``x`` is the tick value and the variable ``pos`` is
# tick position. This creates a StrMethodFormatter automatically.
setup(axs0[0], title="'{x} km'")
axs0[0].xaxis.set_major_formatter('{x} km')
# A function can also be used directly as a formatter. The function must take
# two arguments: ``x`` for the tick value and ``pos`` for the tick position,
# and must return a ``str`` This creates a FuncFormatter automatically.
setup(axs0[1], title="lambda x, pos: str(x-5)")
axs0[1].xaxis.set_major_formatter(lambda x, pos: str(x-5))
fig0.tight_layout()
# The remaining examples use Formatter objects.
fig1, axs1 = plt.subplots(7, 1, figsize=(8, 6))
fig1.suptitle('Formatter Object Formatting')
# Null formatter
setup(axs1[0], title="NullFormatter()")
axs1[0].xaxis.set_major_formatter(ticker.NullFormatter())
# StrMethod formatter
setup(axs1[1], title="StrMethodFormatter('{x:.3f}')")
axs1[1].xaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.3f}"))
# FuncFormatter can be used as a decorator
@ticker.FuncFormatter
def major_formatter(x, pos):
return f'[{x:.2f}]'
setup(axs1[2], title='FuncFormatter("[{:.2f}]".format)')
axs1[2].xaxis.set_major_formatter(major_formatter)
# Fixed formatter
setup(axs1[3], title="FixedFormatter(['A', 'B', 'C', ...])")
# FixedFormatter should only be used together with FixedLocator.
# Otherwise, one cannot be sure where the labels will end up.
positions = [0, 1, 2, 3, 4, 5]
labels = ['A', 'B', 'C', 'D', 'E', 'F']
axs1[3].xaxis.set_major_locator(ticker.FixedLocator(positions))
axs1[3].xaxis.set_major_formatter(ticker.FixedFormatter(labels))
# Scalar formatter
setup(axs1[4], title="ScalarFormatter()")
axs1[4].xaxis.set_major_formatter(ticker.ScalarFormatter(useMathText=True))
# FormatStr formatter
setup(axs1[5], title="FormatStrFormatter('#%d')")
axs1[5].xaxis.set_major_formatter(ticker.FormatStrFormatter("#%d"))
# Percent formatter
setup(axs1[6], title="PercentFormatter(xmax=5)")
axs1[6].xaxis.set_major_formatter(ticker.PercentFormatter(xmax=5))
fig1.tight_layout()
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.pyplot.subplots`
# - `matplotlib.axes.Axes.text`
# - `matplotlib.axis.Axis.set_major_formatter`
# - `matplotlib.axis.Axis.set_major_locator`
# - `matplotlib.axis.Axis.set_minor_locator`
# - `matplotlib.axis.XAxis.set_ticks_position`
# - `matplotlib.axis.YAxis.set_ticks_position`
# - `matplotlib.ticker.FixedFormatter`
# - `matplotlib.ticker.FixedLocator`
# - `matplotlib.ticker.FormatStrFormatter`
# - `matplotlib.ticker.FuncFormatter`
# - `matplotlib.ticker.MultipleLocator`
# - `matplotlib.ticker.NullFormatter`
# - `matplotlib.ticker.NullLocator`
# - `matplotlib.ticker.PercentFormatter`
# - `matplotlib.ticker.ScalarFormatter`
# - `matplotlib.ticker.StrMethodFormatter`
```
#### File: examples/userdemo/custom_boxstyle01.py
```python
r"""
=================
Custom box styles
=================
This example demonstrates the implementation of a custom `.BoxStyle`.
Custom `.ConnectionStyle`\s and `.ArrowStyle`\s can be similarly defined.
"""
from matplotlib.patches import BoxStyle
from matplotlib.path import Path
import matplotlib.pyplot as plt
###############################################################################
# Custom box styles can be implemented as a function that takes arguments
# specifying both a rectangular box and the amount of "mutation", and
# returns the "mutated" path. The specific signature is the one of
# ``custom_box_style`` below.
#
# Here, we return a new path which adds an "arrow" shape on the left of the
# box.
#
# The custom box style can then be used by passing
# ``bbox=dict(boxstyle=custom_box_style, ...)`` to `.Axes.text`.
def custom_box_style(x0, y0, width, height, mutation_size):
"""
Given the location and size of the box, return the path of the box around
it.
Rotation is automatically taken care of.
Parameters
----------
x0, y0, width, height : float
Box location and size.
mutation_size : float
Mutation reference scale, typically the text font size.
"""
# padding
mypad = 0.3
pad = mutation_size * mypad
# width and height with padding added.
width = width + 2 * pad
height = height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad
x1, y1 = x0 + width, y0 + height
# return the new path
return Path([(x0, y0),
(x1, y0), (x1, y1), (x0, y1),
(x0-pad, (y0+y1)/2), (x0, y0),
(x0, y0)],
closed=True)
fig, ax = plt.subplots(figsize=(3, 3))
ax.text(0.5, 0.5, "Test", size=30, va="center", ha="center", rotation=30,
bbox=dict(boxstyle=custom_box_style, alpha=0.2))
###############################################################################
# Likewise, custom box styles can be implemented as classes that implement
# ``__call__``.
#
# The classes can then be registered into the ``BoxStyle._style_list`` dict,
# which allows specifying the box style as a string,
# ``bbox=dict(boxstyle="registered_name,param=value,...", ...)``.
# Note that this registration relies on internal APIs and is therefore not
# officially supported.
class MyStyle:
"""A simple box."""
def __init__(self, pad=0.3):
"""
The arguments must be floats and have default values.
Parameters
----------
pad : float
amount of padding
"""
self.pad = pad
super().__init__()
def __call__(self, x0, y0, width, height, mutation_size):
"""
Given the location and size of the box, return the path of the box
around it.
Rotation is automatically taken care of.
Parameters
----------
x0, y0, width, height : float
Box location and size.
mutation_size : float
Reference scale for the mutation, typically the text font size.
"""
# padding
pad = mutation_size * self.pad
# width and height with padding added
width = width + 2.*pad
height = height + 2.*pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad
x1, y1 = x0 + width, y0 + height
# return the new path
return Path([(x0, y0),
(x1, y0), (x1, y1), (x0, y1),
(x0-pad, (y0+y1)/2.), (x0, y0),
(x0, y0)],
closed=True)
BoxStyle._style_list["angled"] = MyStyle # Register the custom style.
fig, ax = plt.subplots(figsize=(3, 3))
ax.text(0.5, 0.5, "Test", size=30, va="center", ha="center", rotation=30,
bbox=dict(boxstyle="angled,pad=0.5", alpha=0.2))
del BoxStyle._style_list["angled"] # Unregister it.
plt.show()
```
#### File: examples/widgets/span_selector.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import SpanSelector
# Fixing random state for reproducibility
np.random.seed(19680801)
fig, (ax1, ax2) = plt.subplots(2, figsize=(8, 6))
x = np.arange(0.0, 5.0, 0.01)
y = np.sin(2*np.pi*x) + 0.5*np.random.randn(len(x))
ax1.plot(x, y)
ax1.set_ylim(-2, 2)
ax1.set_title('Press left mouse button and drag '
'to select a region in the top graph')
line2, = ax2.plot([], [])
def onselect(xmin, xmax):
indmin, indmax = np.searchsorted(x, (xmin, xmax))
indmax = min(len(x) - 1, indmax)
region_x = x[indmin:indmax]
region_y = y[indmin:indmax]
if len(region_x) >= 2:
line2.set_data(region_x, region_y)
ax2.set_xlim(region_x[0], region_x[-1])
ax2.set_ylim(region_y.min(), region_y.max())
fig.canvas.draw()
#############################################################################
# .. note::
#
# If the SpanSelector object is garbage collected you will lose the
# interactivity. You must keep a hard reference to it to prevent this.
#
span = SpanSelector(ax1, onselect, 'horizontal', useblit=True,
rectprops=dict(alpha=0.5, facecolor='tab:blue'))
# Set useblit=True on most backends for enhanced performance.
plt.show()
#############################################################################
#
# .. admonition:: References
#
# The use of the following functions, methods, classes and modules is shown
# in this example:
#
# - `matplotlib.widgets.SpanSelector`
```
#### File: tutorials/advanced/blitting.py
```python
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 2 * np.pi, 100)
fig, ax = plt.subplots()
# animated=True tells matplotlib to only draw the artist when we
# explicitly request it
(ln,) = ax.plot(x, np.sin(x), animated=True)
# make sure the window is raised, but the script keeps going
plt.show(block=False)
# stop to admire our empty window axes and ensure it is rendered at
# least once.
#
# We need to fully draw the figure at its final size on the screen
# before we continue on so that :
# a) we have the correctly sized and drawn background to grab
# b) we have a cached renderer so that ``ax.draw_artist`` works
# so we spin the event loop to let the backend process any pending operations
plt.pause(0.1)
# get copy of entire figure (everything inside fig.bbox) sans animated artist
bg = fig.canvas.copy_from_bbox(fig.bbox)
# draw the animated artist, this uses a cached renderer
ax.draw_artist(ln)
# show the result to the screen, this pushes the updated RGBA buffer from the
# renderer to the GUI framework so you can see it
fig.canvas.blit(fig.bbox)
for j in range(100):
# reset the background back in the canvas state, screen unchanged
fig.canvas.restore_region(bg)
# update the artist, neither the canvas state nor the screen have changed
ln.set_ydata(np.sin(x + (j / 100) * np.pi))
# re-render the artist, updating the canvas state, but not the screen
ax.draw_artist(ln)
# copy the image to the GUI state, but screen might not be changed yet
fig.canvas.blit(fig.bbox)
# flush any pending GUI events, re-painting the screen if needed
fig.canvas.flush_events()
# you can put a pause in if you want to slow things down
# plt.pause(.1)
###############################################################################
# This example works and shows a simple animation, however because we
# are only grabbing the background once, if the size of the figure in
# pixels changes (due to either the size or dpi of the figure
# changing) , the background will be invalid and result in incorrect
# (but sometimes cool looking!) images. There is also a global
# variable and a fair amount of boiler plate which suggests we should
# wrap this in a class.
#
# Class-based example
# -------------------
#
# We can use a class to encapsulate the boilerplate logic and state of
# restoring the background, drawing the artists, and then blitting the
# result to the screen. Additionally, we can use the ``'draw_event'``
# callback to capture a new background whenever a full re-draw
# happens to handle resizes correctly.
class BlitManager:
def __init__(self, canvas, animated_artists=()):
"""
Parameters
----------
canvas : FigureCanvasAgg
The canvas to work with, this only works for sub-classes of the Agg
canvas which have the `~FigureCanvasAgg.copy_from_bbox` and
`~FigureCanvasAgg.restore_region` methods.
animated_artists : Iterable[Artist]
List of the artists to manage
"""
self.canvas = canvas
self._bg = None
self._artists = []
for a in animated_artists:
self.add_artist(a)
# grab the background on every draw
self.cid = canvas.mpl_connect("draw_event", self.on_draw)
def on_draw(self, event):
"""Callback to register with 'draw_event'."""
cv = self.canvas
if event is not None:
if event.canvas != cv:
raise RuntimeError
self._bg = cv.copy_from_bbox(cv.figure.bbox)
self._draw_animated()
def add_artist(self, art):
"""
Add an artist to be managed.
Parameters
----------
art : Artist
The artist to be added. Will be set to 'animated' (just
to be safe). *art* must be in the figure associated with
the canvas this class is managing.
"""
if art.figure != self.canvas.figure:
raise RuntimeError
art.set_animated(True)
self._artists.append(art)
def _draw_animated(self):
"""Draw all of the animated artists."""
fig = self.canvas.figure
for a in self._artists:
fig.draw_artist(a)
def update(self):
"""Update the screen with animated artists."""
cv = self.canvas
fig = cv.figure
# paranoia in case we missed the draw event,
if self._bg is None:
self.on_draw(None)
else:
# restore the background
cv.restore_region(self._bg)
# draw all of the animated artists
self._draw_animated()
# update the GUI state
cv.blit(fig.bbox)
# let the GUI event loop process anything it has to do
cv.flush_events()
###############################################################################
# Here is how we would use our class. This is a slightly more complicated
# example than the first case as we add a text frame counter as well.
# make a new figure
fig, ax = plt.subplots()
# add a line
(ln,) = ax.plot(x, np.sin(x), animated=True)
# add a frame number
fr_number = ax.annotate(
"0",
(0, 1),
xycoords="axes fraction",
xytext=(10, -10),
textcoords="offset points",
ha="left",
va="top",
animated=True,
)
bm = BlitManager(fig.canvas, [ln, fr_number])
# make sure our window is on the screen and drawn
plt.show(block=False)
plt.pause(.1)
for j in range(100):
# update the artists
ln.set_ydata(np.sin(x + (j / 100) * np.pi))
fr_number.set_text("frame: {j}".format(j=j))
# tell the blitting manager to do its thing
bm.update()
###############################################################################
# This class does not depend on `.pyplot` and is suitable to embed
# into larger GUI application.
``` |
{
"source": "JohnLauFoo/PhaseControl2022_Yu",
"score": 2
} |
#### File: PhaseControl2022_Yu/Closedloop_control/detector.py
```python
from collections import deque
import numpy as np
class Detector:
def __init__(self, target_channel, trigger_dio, num_to_wait, regr_buffer_size, fltr_buffer_size, target_lowcut,
target_highcut, slope):
self.target_channel = target_channel
self.trigger_dio = trigger_dio
self.num_to_wait = num_to_wait
self.regr_buffer_size = regr_buffer_size
self.fltr_buffer_size = fltr_buffer_size
self.target_lowcut = target_lowcut
self.target_highcut = target_highcut
self.trigger = False
self.data_buffer = deque([], maxlen=fltr_buffer_size)
self.sign_buffer = None
self.curr_sign = None
self.slope = slope # some default slope known empirically
self.sample_count = None
# TESTED
def update_slope(self):
if self.sample_count is None: # the very first critical point
# initialize sample count according to current phase (0 or pi)
self.sample_count = self.curr_sign*int(np.pi/self.slope)
return
self.slope = (2-self.curr_sign)*np.pi/self.sample_count
# map 1 (positive) to pi, 0 (negative) to 2pi
self.sample_count = self.curr_sign*int(np.pi/self.slope)
# only reset sample count to 0 at phase = 2pi
# TESTED
def flip_curr_sign(self):
self.curr_sign = not self.curr_sign
# TESTED
def check_sign_buffer(self):
# needs optimization
rtn = True
for sign in self.sign_buffer:
if self.curr_sign == sign:
rtn = False
return rtn
# TESTED
def initialize_sign_buffer(self):
self.sign_buffer = deque([self.curr_sign]*self.num_to_wait, maxlen=self.num_to_wait)
```
#### File: PhaseControl2022_Yu/Closedloop_control/phase_detection.py
```python
""""""
from scipy.signal import sosfiltfilt
import numpy as np
# TESTED
def generate_matrix(regr_buffer_size):
sampling_axis = np.arange(regr_buffer_size)
A = np.vstack([sampling_axis, np.ones(len(sampling_axis))]).T
return A
# TESTED
def calculate_derv(A, filter, Detector):
curr_filtered = sosfiltfilt(filter, Detector.data_buffer)
curr_regr = curr_filtered[len(curr_filtered) - Detector.regr_buffer_size:, np.newaxis]
pinv = np.linalg.pinv(A)
alpha = pinv.dot(curr_regr)
return alpha[0][0]
# TESTED
def update_signbuffer(A, filter, Detector):
curr_derv = calculate_derv(A, filter, Detector)
Detector.sign_buffer.append(curr_derv > 0)
try:
Detector.sample_count += 1
except TypeError:
pass
``` |
{
"source": "johnlawsharrison/pyacoustid",
"score": 3
} |
#### File: johnlawsharrison/pyacoustid/fpcalc.py
```python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import argparse
import sys
import acoustid
import chromaprint
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-length', metavar='SECS', type=int, default=120,
help='length of the audio data used for fingerprint '
'calculation (default 120)')
parser.add_argument('-raw', action='store_true',
help='output the raw uncompressed fingerprint')
parser.add_argument('paths', metavar='FILE', nargs='+',
help='audio file to be fingerprinted')
args = parser.parse_args()
# make gst not try to parse the args
del sys.argv[1:]
first = True
for i, path in enumerate(args.paths):
try:
duration, fp = acoustid.fingerprint_file(path, args.length)
except Exception:
print("ERROR: unable to calculate fingerprint "
"for file %s, skipping" % path, file=sys.stderr)
continue
if args.raw:
raw_fp = chromaprint.decode_fingerprint(fp)[0]
fp = ','.join(map(str, raw_fp))
if not first:
print
first = False
print('FILE=%s' % path)
print('DURATION=%d' % duration)
print('FINGERPRINT=%s' % fp.decode('utf8'))
if __name__ == '__main__':
main()
``` |
{
"source": "JohnLCaron/electionguard-python",
"score": 2
} |
#### File: src/electionguard/decryption_share.py
```python
from dataclasses import dataclass, field
from typing import Dict, Optional, Tuple, Union
from .chaum_pedersen import ChaumPedersenProof
from .election_object_base import ElectionObjectBase
from .elgamal import ElGamalCiphertext
from .group import ElementModP, ElementModQ
from .logs import log_warning
from .types import BALLOT_ID, CONTEST_ID, GUARDIAN_ID, SELECTION_ID
ELECTION_PUBLIC_KEY = ElementModP
@dataclass
class CiphertextCompensatedDecryptionSelection(ElectionObjectBase):
"""
A compensated fragment of a Guardian's Partial Decryption of a selection generated by an available guardian
"""
guardian_id: GUARDIAN_ID
"""
The Available Guardian that this share belongs to
"""
missing_guardian_id: GUARDIAN_ID
"""
The Missing Guardian for whom this share is calculated on behalf of
"""
description_hash: ElementModQ
"""
The SelectionDescription hash
"""
share: ElementModP
"""
The Share of the decryption of a selection. `M_{i,l} in the spec`
"""
recovery_key: ElementModP
"""
The Recovery Public Key for the missing_guardian that corresponds to the available guardian's share of the secret
"""
proof: ChaumPedersenProof
"""
The Proof that the share was decrypted correctly
"""
ProofOrRecovery = Union[
ChaumPedersenProof, Dict[GUARDIAN_ID, CiphertextCompensatedDecryptionSelection]
]
@dataclass
class CiphertextDecryptionSelection(ElectionObjectBase):
"""
A Guardian's Partial Decryption of a selection. A CiphertextDecryptionSelection
can be generated by a guardian directly, or it can be compensated for by a quoprum of guardians
When the guardian generates this share directly, the `proof` field is populated with
a `chaumPedersen` proof that the decryption share was generated correctly.
When the share is generated on behalf of this guardian by other guardians, the `recovered_parts`
collection is populated with the `CiphertextCompensatedDecryptionSelection` objects generated
by each available guardian.
"""
guardian_id: GUARDIAN_ID
"""
The Available Guardian that this share belongs to
"""
description_hash: ElementModQ
"""
The SelectionDescription hash
"""
share: ElementModP
"""
The Share of the decryption of a selection. `M_i` in the spec
"""
proof: Optional[ChaumPedersenProof] = field(init=True, default=None)
"""
The Proof that the share was decrypted correctly, if the guardian
was available for decryption
"""
recovered_parts: Optional[
Dict[GUARDIAN_ID, CiphertextCompensatedDecryptionSelection]
] = field(init=True, default=None)
"""
the recovered parts of the decryption provided by available guardians,
if the guardian was missing from decryption
"""
def is_valid(
self,
message: ElGamalCiphertext,
election_public_key: ELECTION_PUBLIC_KEY,
extended_base_hash: ElementModQ,
) -> bool:
"""
Verify that this CiphertextDecryptionSelection is valid for a
specific ElGamal key pair, public key, and election context.
:param message: the `ElGamalCiphertext` to compare
:param election_public_key: the `ElementModP Election Public Key for the Guardian
:param extended_base_hash: The `ElementModQ` election extended base hash.
"""
# verify we have a proof or recovered parts
if self.proof is None and self.recovered_parts is None:
log_warning(
f"CiphertextDecryptionSelection is_valid failed for guardian: {self.guardian_id} selection: {self.object_id} with missing data"
)
return False
if self.proof is not None and self.recovered_parts is not None:
log_warning(
f"CiphertextDecryptionSelection is_valid failed for guardian: {self.guardian_id} selection: {self.object_id} cannot have proof and recovery"
)
return False
if self.proof is not None and not self.proof.is_valid(
message,
election_public_key,
self.share,
extended_base_hash,
):
log_warning(
f"CiphertextDecryptionSelection is_valid failed for guardian: {self.guardian_id} selection: {self.object_id} with invalid proof"
)
return False
if self.recovered_parts is not None:
for (
compensating_guardian_id,
part,
) in self.recovered_parts.items():
if not part.proof.is_valid(
message,
part.recovery_key,
part.share,
extended_base_hash,
):
log_warning(
f"CiphertextDecryptionSelection is_valid failed for guardian: {self.guardian_id} selection: {self.object_id} with invalid partial proof"
)
return False
return True
def create_ciphertext_decryption_selection(
object_id: str,
guardian_id: GUARDIAN_ID,
description_hash: ElementModQ,
share: ElementModP,
proof_or_recovery: ProofOrRecovery,
) -> CiphertextDecryptionSelection:
"""
Create a ciphertext decryption selection
:param object_id: Object id
:param guardian_id: Guardian id
:param description_hash: Description hash
:param share: Share
:param proof_or_recovery: Proof or recovery
"""
if isinstance(proof_or_recovery, ChaumPedersenProof):
return CiphertextDecryptionSelection(
object_id, guardian_id, description_hash, share, proof=proof_or_recovery
)
elif isinstance(proof_or_recovery, Dict):
return CiphertextDecryptionSelection(
object_id,
guardian_id,
description_hash,
share,
recovered_parts=proof_or_recovery,
)
else:
log_warning(f"decryption share cannot assign {proof_or_recovery}")
return CiphertextDecryptionSelection(
object_id,
guardian_id,
description_hash,
share,
)
@dataclass
class CiphertextDecryptionContest(ElectionObjectBase):
"""
A Guardian's Partial Decryption of a contest
"""
guardian_id: GUARDIAN_ID
"""
The Available Guardian that this share belongs to
"""
description_hash: ElementModQ
"""
The ContestDescription Hash
"""
selections: Dict[SELECTION_ID, CiphertextDecryptionSelection]
"""
the collection of decryption shares for this contest's selections
"""
@dataclass
class CiphertextCompensatedDecryptionContest(ElectionObjectBase):
"""
A Guardian's Partial Decryption of a contest
"""
guardian_id: GUARDIAN_ID
"""
The Available Guardian that this share belongs to
"""
missing_guardian_id: GUARDIAN_ID
"""
The Missing Guardian for whom this share is calculated on behalf of
"""
description_hash: ElementModQ
"""
The ContestDescription Hash
"""
selections: Dict[SELECTION_ID, CiphertextCompensatedDecryptionSelection]
"""
the collection of decryption shares for this contest's selections
"""
@dataclass
class BallotDecryptionShare:
"""
A Guardian's Partial Decryption Share of a specific ballot (e.g. of a spoiled ballot)
"""
guardian_id: GUARDIAN_ID
"""
The Available Guardian that this share belongs to
"""
public_key: ElementModP
"""
The election public key for the guardian
"""
ballot_id: BALLOT_ID
"""
The Ballot Id that this Decryption Share belongs to
"""
contests: Dict[CONTEST_ID, CiphertextDecryptionContest]
"""
The collection of all contests in the ballot
"""
@dataclass
class CompensatedBallotDecryptionShare:
"""
A Compensated Partial Decryption Share generated by
an available guardian on behalf of a missing guardian
"""
guardian_id: GUARDIAN_ID
"""
The Available Guardian that this share belongs to
"""
missing_guardian_id: GUARDIAN_ID
"""
The Missing Guardian for whom this share is calculated on behalf of
"""
public_key: ElementModP
"""
The election public key for the guardian
"""
ballot_id: BALLOT_ID
"""
The Ballot Id that this Decryption Share belongs to
"""
contests: Dict[CONTEST_ID, CiphertextCompensatedDecryptionContest]
"""
The collection of all contests in the ballot
"""
@dataclass
class TallyDecryptionShare:
"""
A Guardian's Partial Decryption Share of an election tally
"""
guardian_id: GUARDIAN_ID
"""
The Available Guardian that this share belongs to
"""
public_key: ElementModP
"""
The election public key for the guardian
"""
contests: Dict[CONTEST_ID, CiphertextDecryptionContest]
"""
The collection of decryption shares for all contests in the election
"""
spoiled_ballots: Dict[BALLOT_ID, BallotDecryptionShare]
"""
The collection of decryption shares for all spoiled ballots in the election
"""
@dataclass
class CompensatedTallyDecryptionShare:
"""
A Compensated Partial Decryption Share generated by
an available guardian on behalf of a missing guardian
"""
guardian_id: GUARDIAN_ID
"""
The Available Guardian that this share belongs to
"""
missing_guardian_id: GUARDIAN_ID
"""
The Missing Guardian for whom this share is calculated on behalf of
"""
public_key: ElementModP
"""
The election public key for the guardian
"""
contests: Dict[CONTEST_ID, CiphertextCompensatedDecryptionContest]
"""
The collection of decryption shares for all contests in the election
"""
spoiled_ballots: Dict[BALLOT_ID, CompensatedBallotDecryptionShare]
"""
The collection of decryption shares for all spoiled ballots in the election
"""
def get_tally_shares_for_selection(
selection_id: str,
shares: Dict[GUARDIAN_ID, TallyDecryptionShare],
) -> Dict[GUARDIAN_ID, Tuple[ELECTION_PUBLIC_KEY, CiphertextDecryptionSelection]]:
"""
Get all of the cast shares for a specific selection
"""
cast_shares: Dict[
GUARDIAN_ID, Tuple[ELECTION_PUBLIC_KEY, CiphertextDecryptionSelection]
] = {}
for share in shares.values():
for contest in share.contests.values():
for selection in contest.selections.values():
if selection.object_id == selection_id:
cast_shares[share.guardian_id] = (share.public_key, selection)
return cast_shares
def get_spoiled_shares_for_selection(
ballot_id: str,
selection_id: str,
shares: Dict[GUARDIAN_ID, TallyDecryptionShare],
) -> Dict[GUARDIAN_ID, Tuple[ELECTION_PUBLIC_KEY, CiphertextDecryptionSelection]]:
"""
Get the spoiled shares for a given selection
"""
spoiled_shares: Dict[
GUARDIAN_ID, Tuple[ELECTION_PUBLIC_KEY, CiphertextDecryptionSelection]
] = {}
for share in shares.values():
for ballot in share.spoiled_ballots.values():
if ballot.ballot_id == ballot_id:
for contest in ballot.contests.values():
for selection in contest.selections.values():
if selection.object_id == selection_id:
spoiled_shares[share.guardian_id] = (
share.public_key,
selection,
)
return spoiled_shares
def get_ballot_shares_for_selection(
selection_id: str,
shares: Dict[GUARDIAN_ID, BallotDecryptionShare],
) -> Dict[GUARDIAN_ID, Tuple[ELECTION_PUBLIC_KEY, CiphertextDecryptionSelection]]:
"""
Get the ballot shares for a given selection, in the context of a specific ballot
"""
ballot_shares: Dict[
GUARDIAN_ID, Tuple[ELECTION_PUBLIC_KEY, CiphertextDecryptionSelection]
] = {}
for ballot_share in shares.values():
for contest_share in ballot_share.contests.values():
for selection_share in contest_share.selections.values():
if selection_share.object_id == selection_id:
ballot_shares[ballot_share.guardian_id] = (
ballot_share.public_key,
selection_share,
)
return ballot_shares
```
#### File: tests/property/test_hash.py
```python
import unittest
from hypothesis import given
from electionguard.group import ElementModQ
from electionguard.hash import hash_elems
from tests.property.test_group import elements_mod_p, elements_mod_q
class TestHash(unittest.TestCase):
@given(elements_mod_p(), elements_mod_q())
def test_same_answer_twice_in_a_row(self, a: ElementModQ, b: ElementModQ):
# if this doesn't work, then our hash function isn't a function
h1 = hash_elems(a, b)
h2 = hash_elems(a, b)
self.assertEqual(h1, h2)
@given(elements_mod_q(), elements_mod_q())
def test_basic_hash_properties(self, a: ElementModQ, b: ElementModQ):
ha = hash_elems(a)
hb = hash_elems(b)
if a == b:
self.assertEqual(ha, hb)
if ha != hb:
self.assertNotEqual(a, b)
``` |
{
"source": "johnlcd/AppServer",
"score": 2
} |
#### File: app/blueprints/exception.py
```python
from sanic import Blueprint
from ..utils import JsonResult
from ..exceptions import BadRequest
from sanic.exceptions import NotFound, ServerError, FileNotFound, RequestTimeout, PayloadTooLarge, InvalidUsage
exception_blueprint = Blueprint('exception')
@exception_blueprint.exception(NotFound)
def not_fond(request, exception):
return JsonResult.not_ok('not found').response_json()
@exception_blueprint.exception(ServerError)
def server_error(request, exception):
return JsonResult.not_ok('server error').response_json()
@exception_blueprint.exception(FileNotFound)
def file_not_found(request, exception):
return JsonResult.not_ok('file not found').response_json()
@exception_blueprint.exception(RequestTimeout)
def request_timeout(request, exception):
return JsonResult.not_ok('request timeout').response_json()
@exception_blueprint.exception(InvalidUsage)
def request_invalid_usage(request, exception):
return JsonResult.not_ok('invalid usage').response_json()
@exception_blueprint.exception(PayloadTooLarge)
def request_payload_tool_large(request, exception):
return JsonResult.not_ok('payload tool large').response_json()
@exception_blueprint.exception(BadRequest)
def request_timeout(request, exception):
return JsonResult.not_ok(exception.args[0] or 'bad request').response_json()
@exception_blueprint.exception(KeyError)
def request_timeout(request, exception):
return JsonResult.not_ok('bad request').response_json()
```
#### File: app/db/__init__.py
```python
from ..config import Config
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# 创建数据库引擎
__engine = create_engine(Config.db_url, echo=Config.debug, logging_name=Config.log_name)
# 配置数据库
Base = declarative_base()
# __metadata = __Base.metadata
# 配置表
from .models import *
# 配置数据库连接
Session = sessionmaker(__engine)
def init():
# 创建表
Base.metadata.create_all(__engine)
```
#### File: app/utils/json_result.py
```python
from .alchemy_json_encoder import AlchemyEncoder
from sanic.response import json
class JsonResult:
@classmethod
def ok(cls, datas=None):
return JsonResult(True, 0, '', cls.__encode_datas(datas))
@classmethod
def not_ok(cls, msg, code=-1, datas=None):
return JsonResult(False, code, msg, cls.__encode_datas(datas))
@staticmethod
def __encode_datas(datas):
if isinstance(datas, list):
return [AlchemyEncoder.decode(data) for data in datas]
else:
return AlchemyEncoder.decode(datas)
def __init__(self, ok, code, msg, datas):
if ok is not None:
self.ok = ok
if code is not None:
self.code = code
if msg is not None:
self.msg = msg
if datas is not None:
self.datas = datas
def response_json(self, **kwargs):
return json(self, **kwargs)
``` |
{
"source": "johnlee175/LogcatFileReader",
"score": 3
} |
#### File: LogcatFileReader/examples/fetch_merge_show_log_x.py
```python
import re
import os
import sys
import simple_utils
_VERSION = '1.0.0'
_BASE_URL = 'http://www.yourwebsite.com:8080/logs/'
# change this method for your custom
def fetch_urls(uid, date):
result_urls = []
href_pattern = re.compile(r'.*?<a\shref="(logcat\.dump\.(\d\d\d\d\d\d\d\d\d\d\d\d\d\d)\.log.*?)">logcat\.dump\.\d+\.log.+')
req_url = _BASE_URL + uid
for line in simple_utils.fetch_url_with_line(req_url):
match = href_pattern.match(line)
if match:
if int(match.group(2)) >= int(date):
result_urls.append(req_url + '/' + match.group(1))
return result_urls
def unpack(logs_folder):
print 'unzip all log files ...'
for parent, dir_names, file_names in os.walk(logs_folder):
for file_name in file_names:
full_path = os.path.join(parent, file_name)
file_base, file_ext = os.path.splitext(full_path)
if file_ext == '.zip':
if os.path.exists(file_base):
os.remove(file_base)
simple_utils.unzip(full_path, logs_folder, True)
# logcat.dump.20160503082219.log
pattern = re.compile(r'^logcat\.dump\.(\d\d\d\d\d\d\d\d\d\d\d\d\d\d)\.log$')
def compare_file_index(a, b):
a_num = int(pattern.match(a).group(1))
b_num = int(pattern.match(b).group(1))
if a_num > b_num:
return 1
elif a_num < b_num:
return -1
else:
return 0
def show_log(log_path):
cmd = 'java -jar LogcatFileReader-1.0.0.jar ' + log_path + ' threadtime'
print 'exec ' + cmd
os.system(cmd)
def parse_opt(argv):
import optparse
usage = 'Usage: %prog [[options] [value]]'
desc = 'Example: %prog -u 2060900675 -d 20160420153000'
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option('-v', dest='version', action='store_true', help='show the current version')
parser.add_option('-u', dest='user_id', type='string', help="special the device user id",
default=' ', metavar='USER_ID')
parser.add_option('-d', dest='date', type='string', help='which day of log do you want to see',
default=simple_utils.get_curr_date_str(), metavar='DATE_STRING')
options, categories = parser.parse_args(argv[1:])
return options.version, options.user_id, options.date
def main(argv):
logs_folder = 'logs'
version, user_id, date = parse_opt(argv)
if version:
print 'The Current Version: ' + _VERSION
return
if not user_id or len(user_id.strip()) == 0:
print 'User id required, please with -u {USER_ID}'
return
if not simple_utils.is_valid_date_str(date):
print 'The date string provided by -d {DATE_STRING} is invalid, accurate to seconds, like 20160101053000'
return
simple_utils.remove_dir(logs_folder)
url_list = fetch_urls(user_id, date)
if not url_list:
print 'No new log files be matched from remote site'
simple_utils.download(url_list, logs_folder, [url[(url.rindex('/') + 1):] for url in url_list])
unpack(logs_folder)
show_log(simple_utils.merge_files(logs_folder, pattern, compare_file_index))
if __name__ == '__main__':
main(sys.argv)
```
#### File: LogcatFileReader/examples/simple_utils.py
```python
import urllib
import urllib2
import re
import os
import sys
import time
# upload('http://www.mywebsite.com:8080/upload.php', {}, 'file', os.path.join('/home/john/', 'a.txt'))
def upload(http_url, form_params, file_item_name, file_path):
boundary = '-----------------%s' % hex(int(time.time() * 1000))
crlf = '\r\n'
separator = '--%s' % boundary
file_type = 'application/octet-stream'
data = []
for key in form_params.keys():
value = form_params[key]
data.append(separator)
data.append('Content-Disposition: form-data; name="%s"%s' % (key, crlf))
data.append(value)
data.append(separator)
data.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (file_item_name, os.path.basename(file_path)))
data.append('Content-Type: %s%s' % (file_type, crlf))
file_res = open(file_path)
data.append(file_res.read())
file_res.close()
data.append('%s--%s' % (separator, crlf))
http_body = crlf.join(data)
req = urllib2.Request(http_url, data=http_body)
req.add_header('Content-Type', 'multipart/form-data; boundary=%s' % boundary)
req.add_header('Connection', 'Keep-Alive')
resp = urllib2.urlopen(req, timeout=30)
print resp.read()
# unzip('/home/john/a.zip', '/home/john/', True)
def unzip(zip_path, extract_dir, delete_zip_on_extracted):
import zipfile
# comment following code is because of the unix file permissions lost
# zip_files = zipfile.ZipFile(zip_path, 'r')
# zip_files.extractall(extract_dir)
# zip_files.close()
if not zipfile.is_zipfile(zip_path):
print "%s is not a zip file" % zip_path
exit(0)
z = zipfile.ZipFile(zip_path)
try:
for info in z.infolist():
name = info.filename
if '..' in name:
continue
if name.startswith('/'):
name = name[1:]
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
if name.endswith('/'): # directory
dirname = os.path.dirname(target)
if not os.path.isdir(dirname):
os.makedirs(dirname)
else: # file
dirname = os.path.dirname(target)
if not os.path.isdir(dirname):
os.makedirs(dirname)
data = z.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(target, unix_attributes)
finally:
z.close()
if delete_zip_on_extracted:
os.remove(zip_path)
# 20161201120909
def get_curr_date_str():
return time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
# 20161201120909
def is_valid_date_str(date_str):
try:
time.strptime(date_str, '%Y%m%d%H%M%S')
return True
except ValueError, e:
print e
return False
def remove_dir(top_dir):
if os.path.exists(top_dir):
for root, dirs, files in os.walk(top_dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(top_dir)
def delete_file(src):
if os.path.isfile(src):
os.remove(src)
elif os.path.isdir(src):
for item in os.listdir(src):
delete_file(os.path.join(src, item))
os.rmdir(src)
# # logcat.dump.20160503082219.log
# pattern = re.compile(r'^logcat\.dump\.(\d\d\d\d\d\d\d\d\d\d\d\d\d\d)\.log$')
# def compare_file_index(a, b):
# a_num = int(pattern.match(a).group(1))
# b_num = int(pattern.match(b).group(1))
# if a_num > b_num:
# return 1
# elif a_num < b_num:
# return -1
# else:
# return 0
# merge_files('./logs/', pattern, compare_file_index)
def merge_files(folder, pattern, compare_file_index):
print 'merge all files ...'
file_list = []
for parent, dir_names, file_names in os.walk(folder):
for file_name in file_names:
if pattern.match(file_name):
file_list.append(file_name)
file_list.sort(cmp=compare_file_index)
output_path = os.path.join(folder, file_list[0])
output_fd = open(output_path, mode='a')
for log_file in file_list[1:]:
log_path = os.path.join(folder, log_file)
input_fd = open(log_path)
data = input_fd.read()
output_fd.write(data)
output_fd.flush()
input_fd.close()
del data
os.remove(log_path)
output_fd.close()
return output_path
def fetch_url_with_line(req_url):
request = urllib2.Request(req_url)
resp = urllib2.urlopen(request, timeout=30)
return resp.read().splitlines()
# download(['http://www.mywebsite.com:8080/download.php?file=a.zip'], './zips/', ['a.zip'])
def download(urls, folder, file_names):
if not os.path.exists(folder):
os.makedirs(folder)
for idx, url in enumerate(urls):
print 'downloading ' + url
file_path = os.path.join(folder, file_names[idx])
urllib.urlretrieve(url, file_path)
# def flat_map_each_file(file_name, file_path, file_ext):
# print 'file path is ' + file_path + ", including file name: " \
# + file_name + ", " + file_ext + " is filename extension"
# iter_files('/home/john/logs/', flat_map_each_file)
def iter_files(top_folder, flat_map_each_file):
for parent, dir_names, file_names in os.walk(top_folder):
for file_name in file_names:
file_path = os.path.join(parent, file_name)
file_base, file_ext = os.path.splitext(file_path)
flat_map_each_file(file_name, file_path, file_ext)
def platform_name():
if sys.platform == 'darwin':
return 'macosx'
elif sys.platform == 'linux2':
return 'linux'
elif sys.platform.find('win') >= 0:
return 'windows'
else:
return ''
def binary(name):
if os.name == 'posix':
return './' + name
elif os.name == 'nt':
return name + '.exe'
else:
return name
``` |
{
"source": "JohnleeHIT/SEX-Net",
"score": 2
} |
#### File: src/data/datagenerator.py
```python
import h5py
import torch
import torch.utils.data as data
import glob
import os
import numpy as np
from skimage.transform import resize
class CTData(data.Dataset):
def __init__(self, root_path, augmentation = None):
self.file_list = [x for x in glob.glob(os.path.join(root_path, "*.h5"))]
self.augmentation = augmentation
def __getitem__(self, index):
# read h5 files
h5file = h5py.File(self.file_list[index])
self.data = h5file.get("data")
self.label = h5file.get("label")
self.subject = h5file.get("subject")
self.slice = h5file.get("slice")
self.data1 = self.data.value
self.label1 = self.label.value
self.subject1 = self.subject.value
self.slice = self.slice.value
self.data1 = resize(self.data1, (224, 224), order=3, mode="constant", cval=0, clip=True, preserve_range=True)
self.label1 = resize(self.label1, (224, 224), order=0, mode="edge", cval=0, clip=True, preserve_range=True)
# data augmentation
if self.augmentation!=None:
data_aug, mask_aug = data_augment(self.data1, self.label1, self.augmentation)
else:
data_aug = self.data1
mask_aug = self.label1
data1_norm = (data_aug - data_aug.mean()) / data_aug.std()
image1_out = data1_norm
label1_out = mask_aug.copy()
label1_out_cat = label1_out.astype("float32")
label1_out_cat = label1_out_cat / 255
image1_out = image1_out.transpose((2,0,1))
label1_out_cat = label1_out_cat.transpose((2,0,1))
subject_out = np.array(self.subject1)
slice_out = np.array(self.slice)
return (torch.from_numpy(image1_out).float(),
torch.from_numpy(label1_out_cat).long(),
torch.from_numpy(subject_out).long(),
torch.from_numpy(slice_out).long())
def __len__(self):
return len(self.file_list)
class CTData_test(data.Dataset):
def __init__(self, root_path, augmentation = None):
self.file_list = [x for x in glob.glob(os.path.join(root_path, "*.h5"))]
self.augmentation = augmentation
def __getitem__(self, index):
# read h5 files
h5file = h5py.File(self.file_list[index])
self.data = h5file.get("data")
self.label = h5file.get("label")
self.subject = h5file.get("subject")
self.slice = h5file.get("slice")
self.data1 = self.data.value
self.label1 = self.label.value
self.subject1 = self.subject.value
self.slice = self.slice.value
self.data1 = resize(self.data1, (224, 224), order=3, mode="constant", cval=0, clip=True, preserve_range=True)
self.label1 = resize(self.label1, (224, 224), order=0, mode="edge", cval=0, clip=True, preserve_range=True)
# data augmentation
if self.augmentation!=None:
data_aug, mask_aug = data_augment(self.data1, self.label1, self.augmentation)
else:
data_aug = self.data1
mask_aug = self.label1
data1_norm = (data_aug - data_aug.mean()) / data_aug.std()
image1_out = data1_norm
label1_out = mask_aug.copy()
label1_out_cat = label1_out.astype("float32")
label1_out_cat = label1_out_cat / 255
image1_out = image1_out.transpose((2,0,1))
label1_out_cat = label1_out_cat.transpose((2,0,1))
subject_out = np.array(self.subject1)
slice_out = np.array(self.slice)
return (torch.from_numpy(image1_out).float(),
torch.from_numpy(label1_out_cat).long(),
torch.from_numpy(subject_out).long(),
torch.from_numpy(slice_out).long(),
torch.from_numpy(data_aug).float())
def __len__(self):
return len(self.file_list)
# data augmentation
def data_augment_volume(datalist, augmentation):
# first get the volume data from the data list
image1, mask1 = datalist
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# Store shapes before augmentation to compare
image1_shape = image1.shape
mask1_shape = mask1.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
# image should be uint8!!
image1 = det.augment_image(image1)
# Change mask to np.uint8 because imgaug doesn't support np.bool
mask1 = det.augment_image(mask1.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert image1.shape == image1_shape, "Augmentation shouldn't change image size"
assert mask1.shape == mask1_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
# masks = masks.astype(np.bool)
return image1, mask1
def data_augment(image, mask, augmentation):
# Augmentation
# This requires the imgaug lib (https://github.com/aleju/imgaug)
if augmentation:
import imgaug
# Augmenters that are safe to apply to masks
# Some, such as Affine, have settings that make them unsafe, so always
# test your augmentation on masks
MASK_AUGMENTERS = ["Sequential", "SomeOf", "OneOf", "Sometimes",
"Fliplr", "Flipud", "CropAndPad",
"Affine", "PiecewiseAffine"]
def hook(images, augmenter, parents, default):
"""Determines which augmenters to apply to masks."""
return augmenter.__class__.__name__ in MASK_AUGMENTERS
# Store shapes before augmentation to compare
image_shape = image.shape
mask_shape = mask.shape
# Make augmenters deterministic to apply similarly to images and masks
det = augmentation.to_deterministic()
# image should be uint8!!
images = det.augment_image(image)
# Change mask to np.uint8 because imgaug doesn't support np.bool
masks = det.augment_image(mask.astype(np.uint8),
hooks=imgaug.HooksImages(activator=hook))
# Verify that shapes didn't change
assert images.shape == image_shape, "Augmentation shouldn't change image size"
assert masks.shape == mask_shape, "Augmentation shouldn't change mask size"
# Change mask back to bool
# masks = masks.astype(np.bool)
return images, masks
if __name__ == "__main__":
path = "./data_train"
data_generator = CTData(root_path=path)
trainloader = data.DataLoader(data_generator, batch_size=1, shuffle=False)
for i, data in enumerate(trainloader):
img1 = data[0].numpy()
img2 = data[1].numpy()
imgs = np.concatenate((img1, img1, img1), axis=0).transpose(1,2,0)
# cv.waitKey(500)
a = 1
``` |
{
"source": "johnleeming/gpx-csv-converter",
"score": 3
} |
#### File: gpx-csv-converter/gpx_csv_converter/__init__.py
```python
from xml.dom import minidom
import csv
import os
class Converter:
def __init__(self, **kwargs):
input_file_name = kwargs.get("input_file")
output_file_name = kwargs.get("output_file")
if not input_file_name or not output_file_name:
raise TypeError("You must specify an input and output file")
input_file_abs_path = os.path.abspath(input_file_name)
input_file_exists = os.path.exists(input_file_abs_path)
if not input_file_exists:
raise TypeError("The file %s does not exist." % input_file_name)
input_extension = os.path.splitext(input_file_name)[1]
if input_extension != ".gpx":
raise TypeError("Input file must be a GPX file")
output_extension = os.path.splitext(output_file_name)[1]
if output_extension != ".csv":
raise TypeError("Output file must be a CSV file")
with open(input_file_abs_path, "r") as gpx_in:
gpx_string = gpx_in.read()
self.convert(gpx_string, output_file_name)
def convert(self, gpx_string, output_file_name):
mydoc = minidom.parseString(gpx_string)
trkpt = mydoc.getElementsByTagName("trkpt")
row_list = []
columns = ["timestamp", "latitude", "longitude", "elevation", "heart_rate"]
# define type of heart rate field
# garmin and other providers may have different elements for the hr field
potential_fields_hr=["ns3:hr","gpxtpx:hr"]
heart_rate_field=potential_fields_hr[0] # default
for potential_field in potential_fields_hr:
if len(mydoc.getElementsByTagName(potential_field))>0:
heart_rate_field=potential_field
# parse trackpoint elements. Search for child elements in each trackpoint so they stay in sync.
for elem in trkpt:
etimestamp=elem.getElementsByTagName("time")
timestamp=None
for selem in etimestamp:
timestamp=(selem.firstChild.data)
lat=(elem.attributes["lat"].value)
lng=(elem.attributes["lon"].value)
eelevation=elem.getElementsByTagName("ele")
elevation=None
for selem in eelevation:
elevation=(selem.firstChild.data)
eheart_rate=elem.getElementsByTagName(heart_rate_field)
heart_rate=None
for selem in eheart_rate:
heart_rate=(selem.firstChild.data)
ecadence=elem.getElementsByTagName(cad)
cadence=None
for selem in ecadencee:
cadence=(selem.firstChild.data)
this_row = [timestamp, lat, lng, elevation, heart_rate, cadence]
row_list.append(this_row)
with open(output_file_name, "wb") as output_file:
writer = csv.writer(output_file)
writer.writerow(columns)
writer.writerows(row_list)
``` |
{
"source": "johnleeming/tea-2",
"score": 3
} |
#### File: johnleeming/tea-2/tea-2v0.95.py
```python
import os
import re
import subprocess
import tkinter as tk
from tkinter import filedialog
from tkinter import scrolledtext
from tkinter import *
from datetime import datetime
import logging
home_path = os.path.expanduser('~/')
word_file = home_path + 'word_lists/default.txt'
text_font = "liberation sans"
text_size = 14
bgcolour = {'dark': 'black', 'light': 'white', 'alarm': 'red'}
fgcolour = {'dark': 'white', 'light': 'black', 'alarm': 'black'}
buttonbg = {'dark': 'darkgrey', 'light': 'lightgrey', 'alarm': 'darkgrey'}
theme = 'light'
winheight = 15 * text_size + 20
winwidth = 1000
rwinheight = 850 - winheight
winx = 100
winy = 0
rwiny = winy + winheight + 65
paddingh = 5
paddingv = 5
hint_text = "[abc] - one of the listed letters | . any character | * 0 or more | + 1 or more | ? optional | " \
"(a|b) a or b | ^ beginning | $ end "
log_file = home_path + 'logs/tea-2.log'
logging.basicConfig(filename=log_file, level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
punctuation = {33: None, 34: None, 39: None, 40: None, 41: None, 42: None, 44: None, 45: None, 58: None, 59: None,
94: None, 95: None, 96: None}
match_word = []
match_list = []
is_error = False
greek_character_names = []
with open(home_path + 'tea-2/greek_characters.txt', 'r') as i_file:
for line in i_file:
greek_character_names.append(line[:-1])
def choose_list():
global word_list, load_message
in_file_name = filedialog.askopenfilename(initialdir=home_path + 'word_lists/', title="Select wordlist",
filetypes=(("Text files", "*.txt"), ("all files", "*.*")))
word_list, load_message = load_list(in_file_name)
load_button_message.set(load_message)
return
def load_list(filename):
temp_list = []
with open(filename, 'r') as input_file:
for line in input_file:
temp_list.append(line[:-1])
load_message = 'Using ' + os.path.basename(filename) + ' (' + str(len(temp_list)) + ' words).'
temp_list.sort(key=lambda x: x.lower())
return temp_list, load_message
def check_word(list, word):
try:
ind = list.index(word)
except Exception:
ind = -1
return ind
def contains_greek_character(w):
for gr_ch in greek_character_names:
if gr_ch in w:
return True
return False
def find_matches(query, list, min, max, ignore_punct, case_sense, start, end, n_words, c_gr_ch):
start_time = datetime.now()
matches = []
no_matches = 0
if start == -1: # if nothing entered in start field, start from beginning
start = 0
if end == -1: # if nothing entered in end field continue to end
end = len(list)
k = start
while k < end:
i = list[k]
j = i
if ignore_punct:
j = j.translate(punctuation)
if not case_sense:
j = j.lower()
words_in_i = i.count(' ') + 1
if query.match(j) and (min <= len(j) <= max):
if words_in_i == int(n_words):
if c_gr_ch:
if contains_greek_character(j):
matches.append(i)
no_matches += 1
else:
matches.append(i)
no_matches += 1
k += 1
end_time = datetime.now()
time_taken = end_time - start_time
return matches, no_matches, time_taken
def display_results(matches, no_matches, first, time_text, no_results_text):
global match_word, definition_box, results_window
results_window = tk.Toplevel()
results_window.title('Results')
results_window['bg'] = bgcolour[theme]
results_window.geometry('%dx%d+%d+%d' % (winwidth, rwinheight, winx, rwiny))
results_window.grid_columnconfigure(0, weight=1)
results_window.grid_columnconfigure(1, weight=1)
results_window.grid_columnconfigure(2, weight=1)
results_window.grid_columnconfigure(3, weight=1)
solution_time_label = tk.Label(results_window, text=time_text, font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
solution_time_label.grid(row=10, column=0, columnspan=2, sticky='ew')
no_results_label = tk.Label(results_window, text=no_results_text, font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
no_results_label.grid(row=10, column=2, columnspan=2, sticky='ew')
definition_box = scrolledtext.ScrolledText(results_window, background=bgcolour[theme], relief=SOLID, borderwidth=1,
font=(text_font, text_size - 2), fg=fgcolour[theme], wrap='word',
height=12, width=100)
definition_box.grid(row=50, column=0, columnspan=4)
i = 0
while i > len(match_word):
match_word[i].grid_forget()
i += 1
match_word.clear()
root.update()
if no_matches - first < 39:
last = no_matches - 1
else:
last = first + 39
i = 0
while i <= last - first:
column_no = int(i / 10)
row_no = 20 + i - column_no * 10
match_word.append(tk.Button(results_window, text=matches[first + i], font=(text_font, text_size - 1),
command=lambda b=i: toggle(b)))
match_word[i].configure(anchor='w', relief='raised')
match_word[i].grid(row=row_no, column=column_no, sticky='ew')
i += 1
def get_definition(word):
try:
response = subprocess.run(['dict', word, ], capture_output=True)
if len(response.stdout) > 0:
definition = response.stdout
else:
definition = 'No definitions found.'
logging.debug(response.stderr)
except Exception:
definition = 'Lookup failed'
logging.info(response.stderr)
return definition
def display_error(error_message):
error_state = tk.StringVar()
error_window = tk.Toplevel()
error_window.title('Error')
error_window['bg'] = bgcolour[theme]
error_window.geometry('%dx%d+%d+%d' % (winwidth / 4, winheight, winx + winwidth, winy))
error_label = tk.Label(error_window, textvar=error_state, font=(text_font, text_size), bg=bgcolour[theme],
fg='red', wraplength=(winwidth / 4 - 10)).pack()
logging.exception(error_message)
error_state.set(error_message)
def go_enter(event):
go()
def go():
start_no = 0
global match_list, results_window, error_window
try:
results_window.destroy()
except Exception:
pass
try:
error_window.destroy()
except Exception:
pass
query = input_query.get()
min_len = min_length.get()
max_len = max_length.get()
a_start = alpha_start.get()
start_ind = check_word(word_list, a_start)
n_words = num_words.get()
if a_start != '' and start_ind == -1:
display_error('Start word not in list, will start from beginning.')
a_end = alpha_end.get()
end_ind = check_word(word_list, a_end)
if a_end != '' and end_ind == -1:
display_error('End word not in list, will continue to end.')
ignore_punct = ignore_punctuation.get()
case_sensitivity = case_sensitive.get()
contains_gr_ch = contains_greek_char.get()
if not case_sensitivity:
query = query.lower()
try:
re_query = re.compile(query)
match_list, no_matches, search_time = find_matches(re_query, word_list, min_len, max_len, ignore_punct,
case_sensitivity, start_ind, end_ind, n_words,
contains_gr_ch)
time_text = 'search took: ' + str(search_time.total_seconds()) + ' seconds'
no_results_text = str(no_matches) + ' matches found'
if no_matches > 40:
no_results_text += ' (first 40 displayed)'
match_list.sort()
display_results(match_list, no_matches, start_no, time_text, no_results_text)
except re.error as error_message:
display_error(error_message)
except Exception:
display_error('Something went wrong.')
def toggle(button_no):
global match_word, definition_box
definition_box.delete(1.0, END)
if match_word[button_no].config('relief')[-1] == 'raised':
match_word[button_no].config(relief="sunken")
definition_text = get_definition(match_list[button_no])
definition_box.insert(1.0, definition_text)
i = 0
while i < len(match_word):
if i != button_no and match_word[i].config('relief')[-1] == 'sunken':
match_word[i].config(relief='raised')
i += 1
else:
match_word[button_no].config(relief="raised")
root = tk.Tk()
word_list, load_message = load_list(word_file)
root.title('Regex-based Word Search')
root['bg'] = bgcolour[theme]
root.geometry('%dx%d+%d+%d' % (winwidth, winheight, winx, winy))
root.grid_columnconfigure(0, weight=1)
root.grid_columnconfigure(1, weight=1)
root.grid_columnconfigure(2, weight=1)
root.grid_columnconfigure(3, weight=1)
load_button_message = tk.StringVar()
load_button_message.set(load_message)
load_message_button = tk.Button(root, textvar=load_button_message, font=(text_font, text_size - 1), bg=buttonbg[theme],
fg=fgcolour[theme], command=choose_list)
load_message_button.grid(row=0, sticky='wn', column=0, columnspan=2, padx=paddingh, pady=paddingv)
ignore_punctuation = tk.BooleanVar()
punctuation_checkbox = tk.Checkbutton(root, text='Ignore Punctuation', variable=ignore_punctuation, onvalue=True,
offvalue=False, font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
punctuation_checkbox.grid(row=0, sticky='wn', column=3, padx=paddingh, pady=paddingv)
punctuation_checkbox.select()
case_sensitive = tk.BooleanVar()
case_sensitive_checkbox = tk.Checkbutton(root, text='Case Sensitive', variable=case_sensitive, onvalue=True,
offvalue=False, font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
case_sensitive_checkbox.grid(row=0, sticky='wn', column=2, padx=paddingh, pady=paddingv)
prompt = tk.Label(root, text='Enter Regex query: ', font=(text_font, text_size), bg=bgcolour[theme], fg=fgcolour[theme])
prompt.grid(row=1, column=0, padx=paddingh, pady=paddingv)
input_query = tk.StringVar()
query_entry = tk.Entry(root, textvariable=input_query, font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
query_entry.grid(row=1, column=1, columnspan=2, sticky='ew')
query_entry.bind('<Return>', go_enter)
enter_button = tk.Button(root, text="Go", font=(text_font, text_size), bg=buttonbg[theme], fg=fgcolour[theme],
command=go).grid(row=1, column=3, padx=paddingh, pady=paddingv, sticky='ew')
hint_label = tk.Label(root, text=hint_text, font=(text_font, text_size - 2), bg=bgcolour[theme], fg=fgcolour[theme])
hint_label.grid(row=3, sticky='wn', column=0, columnspan=4, padx=paddingh, pady=paddingv)
min_length_label = tk.Label(root, text='Minimum length: ', font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
min_length_label.grid(row=4, column=0, padx=paddingh, pady=paddingv, sticky='ew')
min_length = tk.IntVar()
min_length.set(3)
min_length_entry = tk.Entry(root, textvariable=min_length, font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
min_length_entry.grid(row=4, column=1, padx=paddingh, pady=paddingv, sticky='ew')
max_length_label = tk.Label(root, text='Maximum length: ', font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
max_length_label.grid(row=4, column=2, padx=paddingh, pady=paddingv, sticky='ew')
max_length = tk.IntVar()
max_length.set(12)
max_length_entry = tk.Entry(root, textvariable=max_length, font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
max_length_entry.grid(row=4, column=3, padx=paddingh, pady=paddingv, sticky='ew')
alpha_start_label = tk.Label(root, text='Alphabetic start: ', font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
alpha_start_label.grid(row=5, column=0, padx=paddingh, pady=paddingv, sticky='ew')
alpha_start = tk.StringVar()
alpha_start_entry = tk.Entry(root, textvariable=alpha_start, font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
alpha_start_entry.grid(row=5, column=1, padx=paddingh, pady=paddingv, sticky='ew')
alpha_end_label = tk.Label(root, text='Alphabetic End: ', font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
alpha_end_label.grid(row=5, column=2, padx=paddingh, pady=paddingv, sticky='ew')
alpha_end = tk.StringVar()
alpha_end_entry = tk.Entry(root, textvariable=alpha_end, font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
alpha_end_entry.grid(row=5, column=3, padx=paddingh, pady=paddingv, sticky='ew')
num_words_label = tk.Label(root, text='Number of words:', font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
num_words_label.grid(row=6, column=0, padx=paddingh, pady=paddingv, sticky='ew')
num_words = tk.IntVar(value=1)
num_words = tk.Entry(root, textvariable=num_words, font=(text_font, text_size), bg=bgcolour[theme], fg=fgcolour[theme])
num_words.grid(row=6, column=1, padx=paddingh, pady=paddingv, sticky='ew')
contains_greek_char = tk.BooleanVar()
contains_greek_char_checkbox = tk.Checkbutton(root, text='Contains Greek', variable=contains_greek_char, onvalue=True,
offvalue=False, font=(text_font, text_size), bg=bgcolour[theme],
fg=fgcolour[theme])
contains_greek_char_checkbox.grid(row=6, sticky='wn', column=2, padx=paddingh, pady=paddingv)
root.mainloop()
``` |
{
"source": "johnlees/mandrake",
"score": 3
} |
#### File: mandrake/mandrake/plot.py
```python
import sys
import operator
from collections import defaultdict
from functools import partial
import pandas as pd
import numpy as np
from tqdm import tqdm
import plotly.express as px
import plotly.graph_objects as go
import matplotlib as mpl
mpl.use('Agg')
#mpl.rcParams.update({'font.size': 8})
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.animation as animation
from .utils import norm_and_centre
from .sound import encode_audio
# Interactive HTML plot using plotly
def plotSCE_html(embedding, names, labels, output_prefix, hover_labels=True, dbscan=True, seed=42):
if dbscan:
not_noise = labels != -1
not_noise_list = list(np.where(not_noise)[0])
plot_df = pd.DataFrame({'SCE dimension 1': embedding[not_noise, 0],
'SCE dimension 2': embedding[not_noise, 1],
'names': [names[i] for i in not_noise_list],
'Label': [str(labels[x]) for x in not_noise_list]})
else:
plot_df = pd.DataFrame({'SCE dimension 1': embedding[:, 0],
'SCE dimension 2': embedding[:, 1],
'names': names,
'Label': [str(x) for x in labels]})
random_colour_map = {}
rng = np.random.default_rng(seed=seed)
for label in sorted(pd.unique(plot_df['Label'])):
# Alternative approach with hsl representation
# from hsluv import hsluv_to_hex ## outside of loop
# hue = rng.uniform(0, 360)
# saturation = rng.uniform(60, 100)
# luminosity = rng.uniform(50, 90)
# random_colour_map[label] = hsluv_to_hex([hue, saturation, luminosity])
# Random in rbg seems to give better contrast
rgb = rng.integers(low=0, high=255, size=3)
random_colour_map[label] = ",".join(["rgb(" + str(rgb[0]),
str(rgb[1]),
str(rgb[2]) + ")"])
# Plot clustered points
fig = px.scatter(plot_df, x="SCE dimension 1", y="SCE dimension 2",
hover_name='names' if hover_labels else None,
color='Label',
color_discrete_map=random_colour_map,
render_mode='webgl')
fig.layout.update(showlegend=False)
fig.update_traces(marker=dict(size=10,
line=dict(width=2,
color='DarkSlateGrey')),
text=plot_df['names'] if hover_labels else None,
hoverinfo='text' if hover_labels else None,
opacity=1.0,
selector=dict(mode='markers'))
if dbscan:
# Plot noise points
fig.add_trace(
go.Scattergl(
mode='markers',
x=embedding[labels == -1, 0],
y=embedding[labels == -1, 1],
text=[names[i] for i in list(np.where(labels == -1)[0])] if hover_labels else None,
hoverinfo='text' if hover_labels else None,
opacity=0.5,
marker=dict(
color='black',
size=8
),
showlegend=False
)
)
fig.write_html(output_prefix + '.embedding.html')
# Hexagon density plot to see overplotting
def plotSCE_hex(embedding, output_prefix):
# Set up figure with scale bar
plt.figure(figsize=(8, 8), dpi=320, facecolor='w', edgecolor='k')
ax = plt.subplot()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
# Hex plot
hb = ax.hexbin(embedding[:, 0], embedding[:, 1],
mincnt=1, gridsize=50, cmap='inferno')
# Colour bar
cbar = plt.colorbar(hb, cax=cax)
cbar.set_label('Samples')
# Draw the plot
ax.set_title('Embedding density')
ax.set_xlabel('SCE dimension 1')
ax.set_ylabel('SCE dimension 2')
plt.savefig(output_prefix + ".embedding_density.pdf")
# Matplotlib static plot, and animation if available
def plotSCE_mpl(embedding, results, labels, output_prefix, sound=False,
threads=1, dbscan=True, seed=42):
# Set the style by group
if embedding.shape[0] > 10000:
pt_scale = 1.5
elif embedding.shape[0] > 1000:
pt_scale = 3
else:
pt_scale = 7
# If labels are strings
unique_labels = set(labels)
if not isinstance(labels, np.ndarray):
labels = np.array(labels, dtype="object")
rng = np.random.default_rng(seed=seed)
style_dict = defaultdict(dict)
for k in sorted(unique_labels):
if k == -1 and dbscan:
style_dict['ptsize'][k] = 1 * pt_scale
style_dict['col'][k] = 'k'
style_dict['mec'][k] = None
style_dict['mew'][k] = 0
else:
style_dict['ptsize'][k] = 2 * pt_scale
style_dict['col'][k] = tuple(rng.uniform(size=3))
style_dict['mec'][k] = 'k' if embedding.shape[0] <= 10000 else None
style_dict['mew'][k] = 0.2 * pt_scale if embedding.shape[0] <= 10000 else 0
# Static figure is a scatter plot, drawn by class
plt.figure(figsize=(8, 8), dpi=320, facecolor='w', edgecolor='k')
cluster_sizes = {}
for k in sorted(unique_labels):
class_member_mask = (labels == k)
xy = embedding[class_member_mask]
cluster_sizes[k] = xy.shape[0]
plt.plot(xy[:, 0], xy[:, 1], '.',
color=style_dict['col'][k],
markersize=style_dict['ptsize'][k],
mec=style_dict['mec'][k],
mew=style_dict['mew'][k])
# plot output
if dbscan:
plt.title('HDBSCAN – estimated number of spatial clusters: %d' % (len(unique_labels) - 1))
plt.xlabel('SCE dimension 1')
plt.ylabel('SCE dimension 2')
plt.savefig(output_prefix + ".embedding_static.png")
plt.close()
# Make animation
if results.animated():
sys.stderr.write("Creating animation\n")
plt.style.use('dark_background')
fig = plt.figure(facecolor='k', edgecolor='w', constrained_layout=True)
fig.set_size_inches(16, 8, True)
gs = fig.add_gridspec(2, 2)
ax_em = fig.add_subplot(gs[:, 0])
ax_em.set_xlabel('SCE dimension 1')
ax_em.set_ylabel('SCE dimension 2')
ax_eq = fig.add_subplot(gs[1, 1])
ax_eq.set_xlabel('Iteration')
ax_eq.set_ylabel('Eq')
ax_eq.set_ylim(bottom=0)
ax_leg = fig.add_subplot(gs[0, 1])
ax_leg.axis("off")
# Set a legend, up to fifteen classes
cluster_sizes = sorted(cluster_sizes.items(),
key=operator.itemgetter(1), reverse=True)
for idx, sizes in enumerate(cluster_sizes):
k = sizes[0]
if idx < 30:
style_dict['label'][k] = str(k) + " (" + str(sizes[1]) + ")"
else:
style_dict['label'][k] = None
ims = []
iter_series, eq_series = results.get_eq()
for frame in tqdm(range(results.n_frames()), unit="frames"):
animated = True if frame > 0 else False
# Eq plot at bottom, for current frame
eq_im, = ax_eq.plot(iter_series[0:(frame+1)], eq_series[0:(frame+1)],
color='cornflowerblue', lw=2, animated=animated)
frame_ims = [eq_im]
# Scatter plot at top, for current frame
embedding = np.array(results.get_embedding_frame(frame)).reshape(-1, 2)
norm_and_centre(embedding)
for k in set(labels):
class_member_mask = (labels == k)
xy = embedding[class_member_mask]
im, = ax_em.plot(xy[:, 0], xy[:, 1], '.',
color=style_dict['col'][k],
markersize=style_dict['ptsize'][k],
mec=style_dict['mec'][k],
mew=style_dict['mew'][k],
label=style_dict['label'][k],
animated=animated)
frame_ims.append(im)
# Legend is the same every frame
if frame == 0:
h, l = ax_em.get_legend_handles_labels()
legend = ax_leg.legend(h, l, borderaxespad=0, loc="center",
ncol=4, markerscale=7/pt_scale,
mode="expand", title="30 largest classes (size)")
frame_ims.append(legend)
# All axes make the frame
ims.append(frame_ims)
# Write the animation (list of lists) to an mp4
fps = 20
ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
repeat=False)
writer = animation.FFMpegWriter(
fps=fps, metadata=dict(title='Mandrake animation'), bitrate=-1)
progress_callback = \
lambda i, n: sys.stderr.write('Saving frame ' + str(i) + ' of ' + str(len(ims)) + ' \r')
ani.save(output_prefix + ".embedding_animation.mp4", writer=writer,
dpi=320, progress_callback=progress_callback)
progress_callback(len(ims), len(ims))
sys.stderr.write("\n")
# Get sound for the video
if sound:
sys.stderr.write("Generating sound\n")
encode_audio(results, output_prefix + ".embedding_animation.mp4",
len(ims) / fps, threads=threads)
```
#### File: mandrake/mandrake/sound.py
```python
import os
import subprocess
from tempfile import mkstemp
import numpy as np
from scipy.io.wavfile import write as write_wav
from .utils import norm_and_centre
from SCE import gen_audio
def encode_audio(results, video_file, total_duration, sample_rate=44100, threads=1):
# Extract oscillator frequencies from data
em_prev = np.array(results.get_embedding_frame(0)).reshape(-1, 2)
norm_and_centre(em_prev)
freqs = np.zeros((results.n_frames() - 1, 2))
for frame in range(1, results.n_frames()):
em_next = np.array(results.get_embedding_frame(frame)).reshape(-1, 2)
norm_and_centre(em_next)
freqs[frame - 1, :] = np.max(np.abs(em_prev - em_next), axis=0)
em_prev = em_next
# Normalise to 120-1200Hz
freqs -= np.min(freqs)
freqs /= np.max(freqs)
freqs = 120 + 1200 * freqs
# Encode
x_audio = _freq_to_wave(list(freqs[:, 0]), total_duration, sample_rate, threads)
y_audio = _freq_to_wave(list(freqs[:, 1]), total_duration, sample_rate, threads)
audio = np.column_stack((x_audio, y_audio))
x_audio = np.array(gen_audio(list(freqs[:, 0]), total_duration, sample_rate, threads))
x_audio *= np.iinfo(np.int16).max / np.max(np.abs(x_audio))
x_audio = x_audio.astype(np.int16, copy=False)
y_audio = np.array(gen_audio(list(freqs[:, 1]), total_duration, sample_rate, threads))
y_audio *= np.iinfo(np.int16).max / np.max(np.abs(y_audio))
y_audio = y_audio.astype(np.int16, copy=False)
# Save the audio as an uncompressed WAV
wav_tmp = mkstemp(suffix=".wav")[1]
write_wav(wav_tmp, sample_rate, audio)
# Compress (aac) and add to the video
vid_tmp = mkstemp(suffix=".mp4")[1]
try:
subprocess.run("ffmpeg -y -i " + video_file + " -i " + wav_tmp + \
" -shortest -c:v copy -map 0:v:0 -map 1:a:0 -c:a aac -b:a 192k " + \
vid_tmp, shell=True, check=True)
except subprocess.CalledProcessError as e:
os.remove(vid_tmp)
os.remove(wav_tmp)
raise e
# Sort out tmp files so output is correct
os.rename(vid_tmp, video_file)
os.remove(wav_tmp)
# internal functions
# Create a list of oscillators across the time series
# Normalise amplitude based on 16-bit signed ints
def _freq_to_wave(freq_list, duration, sample_rate, threads):
audio = np.array(gen_audio(freq_list, duration, sample_rate, threads))
audio *= np.iinfo(np.int16).max / np.max(np.abs(audio))
audio = audio.astype(np.int16, copy=False)
return audio
``` |
{
"source": "johnlees/PopPUNK",
"score": 2
} |
#### File: PopPUNK/PopPUNK/assign.py
```python
import os
import sys
# additional
import numpy as np
import subprocess
from collections import defaultdict
import scipy.optimize
from scipy.sparse import coo_matrix, bmat, find
# required from v2.1.1 onwards (no mash support)
import pp_sketchlib
# import poppunk package
from .__init__ import __version__
from .models import rankFile
#*******************************#
#* *#
#* query assignment *#
#* *#
#*******************************#
def assign_query(dbFuncs,
ref_db,
q_files,
output,
qc_dict,
update_db,
write_references,
distances,
threads,
overwrite,
plot_fit,
graph_weights,
max_a_dist,
max_pi_dist,
type_isolate,
model_dir,
strand_preserved,
previous_clustering,
external_clustering,
core,
accessory,
gpu_sketch,
gpu_dist,
gpu_graph,
deviceid,
web,
json_sketch,
save_partial_query_graph):
"""Code for assign query mode. Written as a separate function so it can be called
by web APIs"""
# Modules imported here as graph tool is very slow to load (it pulls in all of GTK?)
from .models import loadClusterFit, ClusterFit, BGMMFit, DBSCANFit, RefineFit, LineageFit
from .sketchlib import removeFromDB
from .network import fetchNetwork
from .network import construct_network_from_edge_list
from .network import extractReferences
from .network import addQueryToNetwork
from .network import printClusters
from .network import save_network
from .network import get_vertex_list
from .plot import writeClusterCsv
from .prune_db import prune_distance_matrix
from .sketchlib import addRandom
from .utils import storePickle
from .utils import readPickle
from .utils import qcDistMat
from .utils import update_distance_matrices
from .utils import createOverallLineage
from .web import sketch_to_hdf5
createDatabaseDir = dbFuncs['createDatabaseDir']
constructDatabase = dbFuncs['constructDatabase']
joinDBs = dbFuncs['joinDBs']
queryDatabase = dbFuncs['queryDatabase']
readDBParams = dbFuncs['readDBParams']
getSeqsInDb = dbFuncs['getSeqsInDb']
sys.stderr.write("Mode: Assigning clusters of query sequences\n\n")
if ref_db == output:
sys.stderr.write("--output and --ref-db must be different to "
"prevent overwrite.\n")
sys.exit(1)
if (update_db and not distances):
sys.stderr.write("--update-db requires --distances to be provided\n")
sys.exit(1)
# Load the previous model
model_prefix = ref_db
if model_dir is not None:
model_prefix = model_dir
model_file = model_prefix + "/" + os.path.basename(model_prefix) + "_fit"
model = loadClusterFit(model_file + '.pkl',
model_file + '.npz')
model.set_threads(threads)
# Set directories of previous fit
if previous_clustering is not None:
prev_clustering = previous_clustering
else:
prev_clustering = model_prefix
# Find distances to reference db
kmers, sketch_sizes, codon_phased = readDBParams(ref_db)
# Iterate through different types of model fit with a refined model when specified
# Core and accessory assignments use the same model and same overall set of distances
# but have different networks, references, reference distances and assignments
fit_type_list = ['default']
if model.type == 'refine' and model.indiv_fitted:
if core:
fit_type_list.append('core_refined')
if accessory:
fit_type_list.append('accessory_refined')
for fit_type in fit_type_list:
# Define file name extension
file_extension_string = ''
if fit_type != 'default':
file_extension_string = '_' + fit_type
# Find distances vs ref seqs
rNames = []
ref_file_name = os.path.join(model_prefix,
os.path.basename(model_prefix) + file_extension_string + ".refs")
use_ref_graph = \
os.path.isfile(ref_file_name) and not update_db and model.type != 'lineage'
if use_ref_graph:
with open(ref_file_name) as refFile:
for reference in refFile:
rNames.append(reference.rstrip())
else:
if os.path.isfile(distances + ".pkl"):
rNames = readPickle(distances, enforce_self = True, distances=False)[0]
elif update_db:
sys.stderr.write("Reference distances missing, cannot use --update-db\n")
sys.exit(1)
else:
rNames = getSeqsInDb(os.path.join(ref_db, os.path.basename(ref_db) + ".h5"))
# construct database - use a single database directory for all query outputs
if (web and json_sketch is not None):
qNames = sketch_to_hdf5(json_sketch, output)
elif (fit_type == 'default'):
# construct database
createDatabaseDir(output, kmers)
qNames = constructDatabase(q_files,
kmers,
sketch_sizes,
output,
threads,
overwrite,
codon_phased = codon_phased,
calc_random = False,
use_gpu = gpu_sketch,
deviceid = deviceid)
if (fit_type == 'default' or (fit_type != 'default' and use_ref_graph)):
# run query
qrDistMat = queryDatabase(rNames = rNames,
qNames = qNames,
dbPrefix = ref_db,
queryPrefix = output,
klist = kmers,
self = False,
number_plot_fits = plot_fit,
threads = threads,
use_gpu = gpu_dist)
# QC distance matrix
if qc_dict['run_qc']:
seq_names_passing = qcDistMat(qrDistMat, rNames, qNames, ref_db, output, qc_dict)[0]
else:
seq_names_passing = rNames + qNames
# Load the network based on supplied options
genomeNetwork, old_cluster_file = \
fetchNetwork(prev_clustering,
model,
rNames,
ref_graph = use_ref_graph,
core_only = (fit_type == 'core_refined'),
accessory_only = (fit_type == 'accessory_refined'),
use_gpu = gpu_graph)
if max(get_vertex_list(genomeNetwork, use_gpu = gpu_graph)) != (len(rNames) - 1):
sys.stderr.write("There are " + str(max(get_vertex_list(genomeNetwork, use_gpu = gpu_graph)) + 1) + \
" vertices in the network but " + str(len(rNames)) + " reference names supplied; " + \
"please check the '--model-dir' variable is pointing to the correct directory\n")
if model.type == 'lineage':
# Assign lineages by calculating query-query information
addRandom(output, qNames, kmers, strand_preserved, overwrite, threads)
qqDistMat = queryDatabase(rNames = qNames,
qNames = qNames,
dbPrefix = output,
queryPrefix = output,
klist = kmers,
self = True,
number_plot_fits = 0,
threads = threads,
use_gpu = gpu_dist)
model.extend(qqDistMat, qrDistMat)
genomeNetwork = {}
isolateClustering = defaultdict(dict)
for rank in model.ranks:
assignment = model.assign(rank)
# Overwrite the network loaded above
if graph_weights:
weights = model.edge_weights(rank)
else:
weights = None
genomeNetwork[rank] = construct_network_from_edge_list(rNames + qNames,
rNames + qNames,
edge_list = assignment,
weights = weights,
use_gpu = gpu_graph,
summarise = False)
isolateClustering[rank] = \
printClusters(genomeNetwork[rank],
rNames + qNames,
printCSV = False,
use_gpu = gpu_graph)
overall_lineage = createOverallLineage(model.ranks, isolateClustering)
writeClusterCsv(
output + "/" + os.path.basename(output) + '_lineages.csv',
rNames + qNames,
rNames + qNames,
overall_lineage,
output_format = 'phandango',
epiCsv = None,
queryNames = qNames,
suffix = '_Lineage')
else:
# Assign these distances as within or between strain
if fit_type == 'default':
queryAssignments = model.assign(qrDistMat)
dist_type = 'euclidean'
elif fit_type == 'core_refined':
queryAssignments = model.assign(qrDistMat, slope = 0)
dist_type = 'core'
elif fit_type == 'accessory_refined':
queryAssignments = model.assign(qrDistMat, slope = 1)
dist_type = 'accessory'
# Assign clustering by adding to network
if graph_weights:
weights = qrDistMat
else:
weights = None
genomeNetwork, qqDistMat = \
addQueryToNetwork(dbFuncs,
rNames,
qNames,
genomeNetwork,
kmers,
queryAssignments,
model,
output,
distances = distances,
distance_type = dist_type,
queryQuery = (update_db and
(fit_type == 'default' or
(fit_type != 'default' and use_ref_graph)
)
),
strand_preserved = strand_preserved,
weights = weights,
threads = threads,
use_gpu = gpu_graph)
output_fn = os.path.join(output, os.path.basename(output) + file_extension_string)
isolateClustering = \
{'combined': printClusters(genomeNetwork,
rNames + qNames,
output_fn,
old_cluster_file,
external_clustering,
write_references or update_db,
use_gpu = gpu_graph)}
# Update DB as requested
dists_out = output + "/" + os.path.basename(output) + ".dists"
if update_db:
# Check new sequences pass QC before adding them
if len(set(seq_names_passing).difference(rNames + qNames)) > 0:
sys.stderr.write("Queries contained outlier distances, "
"not updating database\n")
else:
sys.stderr.write("Updating reference database to " + output + "\n")
# Update the network + ref list (everything) - no need to duplicate for core/accessory
if fit_type == 'default':
joinDBs(ref_db, output, output,
{"threads": threads, "strand_preserved": strand_preserved})
if model.type == 'lineage':
save_network(genomeNetwork[min(model.ranks)],
prefix = output,
suffix = '_graph',
use_gpu = gpu_graph)
# Save sparse distance matrices and updated model
model.outPrefix = os.path.basename(output)
model.save()
else:
graph_suffix = file_extension_string + '_graph'
save_network(genomeNetwork,
prefix = output,
suffix = graph_suffix,
use_gpu = gpu_graph)
# Load the previous distances
refList_loaded, refList_copy, self, rrDistMat = \
readPickle(distances,
enforce_self = True)
# This should now always be true, otherwise both qrDistMat and sparse matrix
# may need reordering
assert(refList_loaded == rNames)
combined_seq, core_distMat, acc_distMat = \
update_distance_matrices(rNames, rrDistMat,
qNames, qrDistMat,
qqDistMat, threads = threads)
assert combined_seq == rNames + qNames
# Get full distance matrix and save
complete_distMat = \
np.hstack((pp_sketchlib.squareToLong(core_distMat, threads).reshape(-1, 1),
pp_sketchlib.squareToLong(acc_distMat, threads).reshape(-1, 1)))
storePickle(combined_seq, combined_seq, True, complete_distMat, dists_out)
# Copy model if needed
if output != model.outPrefix and fit_type == 'default':
model.copy(output)
# Clique pruning
if model.type != 'lineage':
existing_ref_list = []
with open(ref_file_name) as refFile:
for reference in refFile:
existing_ref_list.append(reference.rstrip())
# Extract references from graph
newRepresentativesIndices, newRepresentativesNames, \
newRepresentativesFile, genomeNetwork = \
extractReferences(genomeNetwork,
combined_seq,
output,
outSuffix = file_extension_string,
existingRefs = existing_ref_list,
type_isolate = qc_dict['type_isolate'],
threads = threads,
use_gpu = gpu_graph)
# intersection that maintains order
newQueries = [x for x in qNames if x in frozenset(newRepresentativesNames)]
# could also have newRepresentativesNames in this diff (should be the same) - but want
# to ensure consistency with the network in case of bad input/bugs
nodes_to_remove = set(range(len(combined_seq))).difference(newRepresentativesIndices)
names_to_remove = [combined_seq[n] for n in nodes_to_remove]
if (len(names_to_remove) > 0):
# This function also writes out the new ref distance matrix
dists_suffix = file_extension_string + '.refs.dists'
postpruning_combined_seq, newDistMat = \
prune_distance_matrix(combined_seq, names_to_remove, complete_distMat,
output + "/" + os.path.basename(output) + dists_suffix)
graph_suffix = file_extension_string + '_refs_graph'
save_network(genomeNetwork,
prefix = output,
suffix = graph_suffix,
use_gpu = gpu_graph)
removeFromDB(output, output, names_to_remove)
db_suffix = file_extension_string + '.refs.h5'
os.rename(output + "/" + os.path.basename(output) + ".tmp.h5",
output + "/" + os.path.basename(output) + db_suffix)
# Check that the updated set of references includes all old references, and references added from
# queries; there may be further new references, even from the original database, where paths are
# added between reference isolates in the same component, or new cliques formed
added_references = set(existing_ref_list).union(set(newQueries))
assert set(postpruning_combined_seq).issuperset(added_references), "Error identifying references"
else:
storePickle(rNames, qNames, False, qrDistMat, dists_out)
if save_partial_query_graph:
if model.type == 'lineage':
save_network(genomeNetwork[min(model.ranks)], prefix = output, suffix = '_graph', use_gpu = gpu_graph)
else:
graph_suffix = file_extension_string + '_graph'
save_network(genomeNetwork, prefix = output, suffix = graph_suffix, use_gpu = gpu_graph)
return(isolateClustering)
#******************************#
#* *#
#* Command line parsing *#
#* *#
#******************************#
def get_options():
import argparse
parser = argparse.ArgumentParser(description='Assign isolates to strains (by POPulation Partitioning Using Nucleotide Kmers)',
prog='poppunk_assign')
# input options
iGroup = parser.add_argument_group('Input files')
iGroup.add_argument('--db', required=True, type = str, help='Location of built reference database')
iGroup.add_argument('--query', required=True, help='File listing query input assemblies')
iGroup.add_argument('--distances', help='Prefix of input pickle of pre-calculated distances (if not in --db)')
iGroup.add_argument('--external-clustering', help='File with cluster definitions or other labels '
'generated with any other method.', default=None)
# output options
oGroup = parser.add_argument_group('Output options')
oGroup.add_argument('--output', required=True, help='Prefix for output files (required)')
oGroup.add_argument('--plot-fit', help='Create this many plots of some fits relating k-mer to core/accessory distances '
'[default = 0]', default=0, type=int)
oGroup.add_argument('--write-references', help='Write reference database isolates\' cluster assignments out too',
default=False, action='store_true')
oGroup.add_argument('--update-db', help='Update reference database with query sequences', default=False, action='store_true')
oGroup.add_argument('--overwrite', help='Overwrite any existing database files', default=False, action='store_true')
oGroup.add_argument('--graph-weights', help='Save within-strain Euclidean distances into the graph', default=False, action='store_true')
# comparison metrics
kmerGroup = parser.add_argument_group('Kmer comparison options')
kmerGroup.add_argument('--min-kmer-count', default=0, type=int, help='Minimum k-mer count when using reads as input [default = 0]')
kmerGroup.add_argument('--exact-count', default=False, action='store_true',
help='Use the exact k-mer counter with reads '
'[default = use countmin counter]')
kmerGroup.add_argument('--strand-preserved', default=False, action='store_true',
help='Treat input as being on the same strand, and ignore reverse complement '
'k-mers [default = use canonical k-mers]')
# qc options
qcGroup = parser.add_argument_group('Quality control options for distances')
qcGroup.add_argument('--qc-filter', help='Behaviour following sequence QC step: "stop" [default], "prune"'
' (analyse data passing QC), or "continue" (analyse all data)',
default='stop', type = str, choices=['stop', 'prune', 'continue'])
qcGroup.add_argument('--retain-failures', help='Retain sketches of genomes that do not pass QC filters in '
'separate database [default = False]', default=False, action='store_true')
qcGroup.add_argument('--max-a-dist', help='Maximum accessory distance to permit [default = 0.5]',
default = 0.5, type = float)
qcGroup.add_argument('--max-pi-dist', help='Maximum core distance to permit [default = 0.5]',
default = 0.5, type = float)
qcGroup.add_argument('--type-isolate', help='Isolate from which distances can be calculated for pruning [default = None]',
default = None, type = str)
qcGroup.add_argument('--length-sigma', help='Number of standard deviations of length distribution beyond '
'which sequences will be excluded [default = 5]', default = None, type = int)
qcGroup.add_argument('--length-range', help='Allowed length range, outside of which sequences will be excluded '
'[two values needed - lower and upper bounds]', default=[None,None],
type = int, nargs = 2)
qcGroup.add_argument('--prop-n', help='Threshold ambiguous base proportion above which sequences will be excluded'
' [default = 0.1]', default = None,
type = float)
qcGroup.add_argument('--upper-n', help='Threshold ambiguous base count above which sequences will be excluded',
default=None, type = int)
# sequence querying
queryingGroup = parser.add_argument_group('Database querying options')
queryingGroup.add_argument('--model-dir', help='Directory containing model to use for assigning queries '
'to clusters [default = reference database directory]', type = str)
queryingGroup.add_argument('--previous-clustering', help='Directory containing previous cluster definitions '
'and network [default = use that in the directory '
'containing the model]', type = str)
queryingGroup.add_argument('--core', help='(with a \'refine\' model) '
'Use a core-distance only model for assigning queries '
'[default = False]', default=False, action='store_true')
queryingGroup.add_argument('--accessory', help='(with a \'refine\' or \'lineage\' model) '
'Use an accessory-distance only model for assigning queries '
'[default = False]', default=False, action='store_true')
# processing
other = parser.add_argument_group('Other options')
other.add_argument('--threads', default=1, type=int, help='Number of threads to use [default = 1]')
other.add_argument('--gpu-sketch', default=False, action='store_true', help='Use a GPU when calculating sketches (read data only) [default = False]')
other.add_argument('--gpu-dist', default=False, action='store_true', help='Use a GPU when calculating distances [default = False]')
other.add_argument('--gpu-graph', default=False, action='store_true', help='Use a GPU when constructing networks [default = False]')
other.add_argument('--deviceid', default=0, type=int, help='CUDA device ID, if using GPU [default = 0]')
other.add_argument('--version', action='version',
version='%(prog)s '+__version__)
other.add_argument('--citation',
action='store_true',
default=False,
help='Give a citation, and possible methods paragraph'
' based on the command line')
# combine
args = parser.parse_args()
# ensure directories do not have trailing forward slash
for arg in [args.db, args.model_dir, args.output, args.previous_clustering]:
if arg is not None:
arg = arg.rstrip('\\')
return args
def main():
"""Main function. Parses cmd line args and runs in the specified mode.
"""
#******************************#
#* *#
#* Check command options *#
#* *#
#******************************#
args = get_options()
# May just want to print the citation
if args.citation:
from .citation import print_citation
print_citation(args, assign=True)
sys.exit(0)
from .sketchlib import checkSketchlibLibrary
from .utils import setGtThreads
from .utils import setupDBFuncs
# Dict of QC options for passing to database construction and querying functions
if args.length_sigma is None and None in args.length_range and args.prop_n is None \
and args.upper_n is None and args.max_a_dist is None and args.max_pi_dist is None:
qc_dict = {'run_qc': False, 'type_isolate': None }
else:
# define defaults if one QC parameter given
# length_sigma
if args.length_sigma is not None:
length_sigma = args.length_sigma
elif None in args.length_range:
length_sigma = 5 # default used in __main__
else:
length_sigma = None
# prop_n
if args.prop_n is not None:
prop_n = args.prop_n
elif args.upper_n is None:
prop_n = 0.1 # default used in __main__
else:
prop_n = None
qc_dict = {
'run_qc': True,
'qc_filter': args.qc_filter,
'retain_failures': args.retain_failures,
'length_sigma': length_sigma,
'length_range': args.length_range,
'prop_n': prop_n,
'upper_n': args.upper_n,
'max_pi_dist': args.max_pi_dist,
'max_a_dist': args.max_a_dist,
'type_isolate': args.type_isolate
}
# Dict of DB access functions for assign_query (which is out of scope)
dbFuncs = setupDBFuncs(args, args.min_kmer_count, qc_dict)
# run according to mode
sys.stderr.write("PopPUNK: assign\n")
sys.stderr.write("\t(with backend: " + dbFuncs['backend'] + " v" + dbFuncs['backend_version'] + "\n")
sys.stderr.write('\t sketchlib: ' + checkSketchlibLibrary() + ')\n')
# Check on parallelisation of graph-tools
setGtThreads(args.threads)
if args.distances is None:
distances = args.db + "/" + os.path.basename(args.db) + ".dists"
else:
distances = args.distances
#*******************************#
#* *#
#* query assignment (function *#
#* at top) *#
#* *#
#*******************************#
assign_query(dbFuncs,
args.db,
args.query,
args.output,
qc_dict,
args.update_db,
args.write_references,
distances,
args.threads,
args.overwrite,
args.plot_fit,
args.graph_weights,
args.max_a_dist,
args.max_pi_dist,
args.type_isolate,
args.model_dir,
args.strand_preserved,
args.previous_clustering,
args.external_clustering,
args.core,
args.accessory,
args.gpu_sketch,
args.gpu_dist,
args.gpu_graph,
args.deviceid,
web=False,
json_sketch=None,
save_partial_query_graph=False)
sys.stderr.write("\nDone\n")
if __name__ == '__main__':
main()
sys.exit(0)
```
#### File: PopPUNK/PopPUNK/citation.py
```python
import os
from .__init__ import __version__
import pp_sketchlib
from .sketchlib import readDBParams, getSeqsInDb
citation = \
"""1. <NAME>, <NAME>, <NAME>, <NAME>, Lo SW, Weiser JN, <NAME>, <NAME>, Croucher NJ.
Fast and flexible bacterial genomic epidemiology with PopPUNK.
Genome Research 29:304-316 (2019).
doi:10.1101/gr.241455.118
<NAME> al. Scikit-learn: Machine Learning in Python.
J. M<NAME>. Res. 12, 2825–2830 (2011)
<NAME> al.
SciPy 1.0: fundamental algorithms for scientific computing in Python.3
Nat. Methods 17, 261–272 (2020)
4. <NAME>. et al.
Array programming with NumPy.
Nature 585, 357–362 (2020)
5. <NAME>.
The graph-tool python library. (2017)
doi:10.6084/m9.figshare.1164194"""
sketchlib_citation = \
"""6. Lees JA & Croucher NJ.
pp-sketchlib (2020).
doi:10.5281/zenodo.4531418
7. <NAME>.
BinDash, software for fast genome distance estimation on a typical personal laptop.
Bioinformatics 35:671–673 (2019).
doi:10.1093/bioinformatics/bty651
8. <NAME>., <NAME>., <NAME>. & <NAME>.
ntHash: recursive nucleotide hashing.
Bioinformatics 32:3492–3494 (2016).
doi:10.1093/bioinformatics/btw397"""
poppunk_methods = "We built a database of %(number_samples)s isolates using " + \
"pp-sketchlib version %(sketchlib_version)s (doi:%(sketchlib_doi)s) with " + \
"sketch version %(sketchlib_hash)s, k-mer lengths %(kmin)s-%(kmax)s, a " + \
"sketch size of %(sketch_size)s and %(seed_type)s seeds [6-8]. We assigned " + \
"isolates to %(cluster_type)s through variable-length-k-mer clustering (VLKC) with PopPUNK " + \
"version %(poppunk_version)s (doi:%(poppunk_doi)s) by fitting a %(model_mode)s " + \
"with %(model_options)s [1-5].\n"
assign_methods = "We queried a database of %(number_samples)s isolates using pp-sketchlib version" + \
" %(sketchlib_version)s (doi:%(sketchlib_doi)s) with sketch version %(sketchlib_hash)s, " + \
"k-mer lengths %(kmin)s-%(kmax)s, a sketch size of %(sketch_size)s and %(seed_type)s seeds [6-8]. " + \
"Sequences were classified using variable-length-k-mer clustering (VLKC) with PopPUNK " + \
"version %(poppunk_version)s (doi:%(poppunk_doi)s) [1-5].\n"
def print_citation(args, assign=False):
# Read values from DB
try:
if assign:
db_prefix = args.db
else:
db_prefix = args.ref_db
n_samples = str(len(getSeqsInDb(db_prefix + "/" + os.path.basename(db_prefix) + ".h5")))
kmers, sketch_size, codon_phased = readDBParams(db_prefix)
kmin = str(min(kmers))
kmax = str(max(kmers))
sketch_size = str(sketch_size * 64)
seed_phasing = "codon-phased" if codon_phased else "dense"
except:
n_samples = "X"
kmin = "X"
kmax = "X"
sketch_size = "X"
seed_phasing = "X"
try:
pp_sketchlib_version = pp_sketchlib.version
sketch_version = pp_sketchlib.version
except AttributeError:
pp_sketchlib_version = "X"
sketch_version = "X"
if assign:
print(assign_methods % {"number_samples" : n_samples,
"sketchlib_version" : pp_sketchlib_version,
"sketchlib_doi" : "10.5281/zenodo.4531418",
"sketchlib_hash" : sketch_version,
"kmin" : kmin,
"kmax" : kmax,
"sketch_size" : sketch_size,
"seed_type" : seed_phasing,
"poppunk_version" : __version__,
"poppunk_doi" : "10.1101/gr.241455.118"})
else:
if args.fit_model == "bgmm":
model_mode = "BGMM"
model_options = str(args.K) + " components"
cluster_type = "strains"
elif args.fit_model == "dbscan":
model_mode = "HDBSCAN"
model_options = str(args.D) + " maximum clusters and a minimum of " + \
str(round((n_samples * (n_samples + 1))/(2 * args.min_cluster_prop))) + \
" points per cluster"
cluster_type = "strains"
elif args.fit_model == "refine":
model_mode = "refined"
model_options = "score " + str(args.score_idx) + " used to optimise the VLKCs"
cluster_type = "strains"
elif args.fit_model == "threshold":
model_mode = "simple threshold"
model_options = "a core-distance cutoff distance of " + args.threshold
cluster_type = "partitions"
elif args.fit_model == "lineage":
model_mode = "lineage"
model_options = "ranks of " + args.ranks
cluster_type = "lineages"
else:
model_mode = "UNKNOWN"
model_options = "UNKNOWN"
cluster_type = "UNKNOWN"
print(poppunk_methods % {"number_samples" : n_samples,
"sketchlib_version" : pp_sketchlib_version,
"sketchlib_doi" : "10.5281/zenodo.4531418",
"sketchlib_hash" : sketch_version,
"kmin" : kmin,
"kmax" : kmax,
"sketch_size" : sketch_size,
"seed_type" : seed_phasing,
"cluster_type": cluster_type,
"poppunk_version" : __version__,
"poppunk_doi" : "10.1101/gr.241455.118",
"model_mode" : model_mode,
"model_options" : model_options})
print(citation)
print(sketchlib_citation)
```
#### File: PopPUNK/PopPUNK/network.py
```python
import os
import sys
import re
# additional
import glob
import operator
import shutil
import subprocess
import numpy as np
import pandas as pd
from scipy.stats import rankdata
from tempfile import mkstemp, mkdtemp
from collections import defaultdict, Counter
from functools import partial
from multiprocessing import Pool
import pickle
import graph_tool.all as gt
import dendropy
import poppunk_refine
# Load GPU libraries
try:
import cupyx
import cugraph
import cudf
import cupy as cp
from numba import cuda
import rmm
gpu_lib = True
except ImportError as e:
gpu_lib = False
from .__main__ import accepted_weights_types
from .__main__ import betweenness_sample_default
from .sketchlib import addRandom
from .utils import iterDistRows
from .utils import listDistInts
from .utils import readIsolateTypeFromCsv
from .utils import readRfile
from .utils import setupDBFuncs
from .utils import isolateNameToLabel
from .utils import check_and_set_gpu
from .unwords import gen_unword
def fetchNetwork(network_dir, model, refList, ref_graph = False,
core_only = False, accessory_only = False, use_gpu = False):
"""Load the network based on input options
Returns the network as a graph-tool format graph, and sets
the slope parameter of the passed model object.
Args:
network_dir (str)
A network used to define clusters
model (ClusterFit)
A fitted model object
refList (list)
Names of references that should be in the network
ref_graph (bool)
Use ref only graph, if available
[default = False]
core_only (bool)
Return the network created using only core distances
[default = False]
accessory_only (bool)
Return the network created using only accessory distances
[default = False]
use_gpu (bool)
Use cugraph library to load graph
Returns:
genomeNetwork (graph)
The loaded network
cluster_file (str)
The CSV of cluster assignments corresponding to this network
"""
# If a refined fit, may use just core or accessory distances
dir_prefix = network_dir + "/" + os.path.basename(network_dir)
# load CUDA libraries - here exit without switching to CPU libraries
# to avoid loading an unexpected file
use_gpu = check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = True)
if use_gpu:
graph_suffix = '.csv.gz'
else:
graph_suffix = '.gt'
if core_only and model.type == 'refine':
if ref_graph:
network_file = dir_prefix + '_core.refs_graph' + graph_suffix
else:
network_file = dir_prefix + '_core_graph' + graph_suffix
cluster_file = dir_prefix + '_core_clusters.csv'
elif accessory_only and model.type == 'refine':
if ref_graph:
network_file = dir_prefix + '_accessory.refs_graph' + graph_suffix
else:
network_file = dir_prefix + '_accessory_graph' + graph_suffix
cluster_file = dir_prefix + '_accessory_clusters.csv'
else:
if ref_graph and os.path.isfile(dir_prefix + '.refs_graph' + graph_suffix):
network_file = dir_prefix + '.refs_graph' + graph_suffix
else:
network_file = dir_prefix + '_graph' + graph_suffix
cluster_file = dir_prefix + '_clusters.csv'
if core_only or accessory_only:
sys.stderr.write("Can only do --core or --accessory fits from "
"a refined fit. Using the combined distances.\n")
# Load network file
sys.stderr.write("Loading network from " + network_file + "\n")
genomeNetwork = load_network_file(network_file, use_gpu = use_gpu)
# Ensure all in dists are in final network
checkNetworkVertexCount(refList, genomeNetwork, use_gpu)
return genomeNetwork, cluster_file
def load_network_file(fn, use_gpu = False):
"""Load the network based on input options
Returns the network as a graph-tool format graph, and sets
the slope parameter of the passed model object.
Args:
fn (str)
Network file name
use_gpu (bool)
Use cugraph library to load graph
Returns:
genomeNetwork (graph)
The loaded network
"""
# Load the network from the specified file
if use_gpu:
G_df = cudf.read_csv(fn, compression = 'gzip')
if 'src' in G_df.columns:
G_df.rename(columns={'src': 'source','dst': 'destination'}, inplace=True)
genomeNetwork = cugraph.Graph()
if 'weights' in G_df.columns:
G_df = G_df[['source','destination','weights']]
genomeNetwork.from_cudf_edgelist(G_df, edge_attr='weights', renumber=False)
else:
genomeNetwork.from_cudf_edgelist(G_df,renumber=False)
sys.stderr.write("Network loaded: " + str(genomeNetwork.number_of_vertices()) + " samples\n")
else:
genomeNetwork = gt.load_graph(fn)
sys.stderr.write("Network loaded: " + str(len(list(genomeNetwork.vertices()))) + " samples\n")
return genomeNetwork
def checkNetworkVertexCount(seq_list, G, use_gpu):
"""Checks the number of network vertices matches the number
of sequence names.
Args:
seq_list (list)
The list of sequence names
G (graph)
The network of sequences
use_gpu (bool)
Whether to use cugraph for graph analyses
"""
vertex_list = set(get_vertex_list(G, use_gpu = use_gpu))
networkMissing = set(set(range(len(seq_list))).difference(vertex_list))
if len(networkMissing) > 0:
sys.stderr.write("ERROR: " + str(len(networkMissing)) + " samples are missing from the final network\n")
sys.exit(1)
def getCliqueRefs(G, reference_indices = set()):
"""Recursively prune a network of its cliques. Returns one vertex from
a clique at each stage
Args:
G (graph)
The graph to get clique representatives from
reference_indices (set)
The unique list of vertices being kept, to add to
"""
cliques = gt.max_cliques(G)
try:
# Get the first clique, and see if it has any members already
# contained in the vertex list
clique = frozenset(next(cliques))
if clique.isdisjoint(reference_indices):
reference_indices.add(list(clique)[0])
# Remove the clique, and prune the resulting subgraph (recursively)
subgraph = gt.GraphView(G, vfilt=[v not in clique for v in G.vertices()])
if subgraph.num_vertices() > 1:
getCliqueRefs(subgraph, reference_indices)
elif subgraph.num_vertices() == 1:
reference_indices.add(subgraph.get_vertices()[0])
except StopIteration:
pass
return reference_indices
def cliquePrune(component, graph, reference_indices, components_list):
"""Wrapper function around :func:`~getCliqueRefs` so it can be
called by a multiprocessing pool
"""
if gt.openmp_enabled():
gt.openmp_set_num_threads(1)
subgraph = gt.GraphView(graph, vfilt=components_list == component)
refs = reference_indices.copy()
if subgraph.num_vertices() <= 2:
refs.add(subgraph.get_vertices()[0])
ref_list = refs
else:
ref_list = getCliqueRefs(subgraph, refs)
return(list(ref_list))
def translate_network_indices(G_ref_df, reference_indices):
"""Function for ensuring an updated reference network retains
numbering consistent with sample names
Args:
G_ref_df (cudf data frame)
List of edges in reference network
reference_indices (list)
The ordered list of reference indices in the original network
Returns:
G_ref (cugraph network)
Network of reference sequences
"""
# Translate network indices to match name order
G_ref_df['source'] = [reference_indices.index(x) for x in G_ref_df['old_source'].to_arrow().to_pylist()]
G_ref_df['destination'] = [reference_indices.index(x) for x in G_ref_df['old_destination'].to_arrow().to_pylist()]
G_ref = add_self_loop(G_ref_df, len(reference_indices) - 1, renumber = True)
return(G_ref)
def extractReferences(G, dbOrder, outPrefix, outSuffix = '', type_isolate = None,
existingRefs = None, threads = 1, use_gpu = False):
"""Extract references for each cluster based on cliques
Writes chosen references to file by calling :func:`~writeReferences`
Args:
G (graph)
A network used to define clusters
dbOrder (list)
The order of files in the sketches, so returned references are in the same order
outPrefix (str)
Prefix for output file
outSuffix (str)
Suffix for output file (.refs will be appended)
type_isolate (str)
Isolate to be included in set of references
existingRefs (list)
References that should be used for each clique
use_gpu (bool)
Use cugraph for graph analysis (default = False)
Returns:
refFileName (str)
The name of the file references were written to
references (list)
An updated list of the reference names
"""
if existingRefs == None:
references = set()
reference_indices = set()
else:
references = set(existingRefs)
index_lookup = {v:k for k,v in enumerate(dbOrder)}
reference_indices = set([index_lookup[r] for r in references])
# Add type isolate, if necessary
type_isolate_index = None
if type_isolate is not None:
if type_isolate in dbOrder:
type_isolate_index = dbOrder.index(type_isolate)
else:
sys.stderr.write('Type isolate ' + type_isolate + ' not found\n')
sys.exit(1)
if use_gpu:
# For large network, use more approximate method for extracting references
reference = {}
# Record the original components to which sequences belonged
component_assignments = cugraph.components.connectivity.connected_components(G)
# Leiden method has resolution parameter - higher values give greater precision
partition_assignments, score = cugraph.leiden(G, resolution = 0.1)
# group by partition, which becomes the first column, so retrieve second column
reference_index_df = partition_assignments.groupby('partition').nth(0)
reference_indices = reference_index_df['vertex'].to_arrow().to_pylist()
# Add type isolate if necessary - before edges are added
if type_isolate_index is not None and type_isolate_index not in reference_indices:
reference_indices.append(type_isolate_index)
# Order found references as in sketchlib database
reference_names = [dbOrder[int(x)] for x in sorted(reference_indices)]
# Extract reference edges
G_df = G.view_edge_list()
if 'src' in G_df.columns:
G_df.rename(columns={'src': 'old_source','dst': 'old_destination'}, inplace=True)
else:
G_df.rename(columns={'source': 'old_source','destination': 'old_destination'}, inplace=True)
G_ref_df = G_df[G_df['old_source'].isin(reference_indices) & G_df['old_destination'].isin(reference_indices)]
# Translate network indices to match name order
G_ref = translate_network_indices(G_ref_df, reference_indices)
# Check references in same component in overall graph are connected in the reference graph
# First get components of original reference graph
reference_component_assignments = cugraph.components.connectivity.connected_components(G_ref)
reference_component_assignments.rename(columns={'labels': 'ref_labels'}, inplace=True)
# Merge with component assignments from overall graph
combined_vertex_assignments = reference_component_assignments.merge(component_assignments,
on = 'vertex',
how = 'left')
combined_vertex_assignments = combined_vertex_assignments[combined_vertex_assignments['vertex'].isin(reference_indices)]
# Find the number of components in the reference graph associated with each component in the overall graph -
# should be one if there is a one-to-one mapping of components - else links need to be added
max_ref_comp_count = combined_vertex_assignments.groupby(['labels'], sort = False)['ref_labels'].nunique().max()
if max_ref_comp_count > 1:
# Iterate through components
for component, component_df in combined_vertex_assignments.groupby(['labels'], sort = False):
# Find components in the overall graph matching multiple components in the reference graph
if component_df.groupby(['labels'], sort = False)['ref_labels'].nunique().iloc[0] > 1:
# Make a graph of the component from the overall graph
vertices_in_component = component_assignments[component_assignments['labels']==component]['vertex']
references_in_component = vertices_in_component[vertices_in_component.isin(reference_indices)].values
G_component_df = G_df[G_df['source'].isin(vertices_in_component) & G_df['destination'].isin(vertices_in_component)]
G_component = cugraph.Graph()
G_component.from_cudf_edgelist(G_component_df)
# Find single shortest path from a reference to all other nodes in the component
traversal = cugraph.traversal.sssp(G_component,source = references_in_component[0])
reference_index_set = set(reference_indices)
# Add predecessors to reference sequences on the SSSPs
predecessor_list = traversal[traversal['vertex'].isin(reference_indices)]['predecessor'].values
predecessors = set(predecessor_list[predecessor_list >= 0].flatten().tolist())
# Add predecessors to reference set and check whether this results in complete paths
# where complete paths are indicated by references' predecessors being within the set of
# references
while len(predecessors) > 0 and len(predecessors - reference_index_set) > 0:
reference_index_set = reference_index_set.union(predecessors)
predecessor_list = traversal[traversal['vertex'].isin(reference_indices)]['predecessor'].values
predecessors = set(predecessor_list[predecessor_list >= 0].flatten().tolist())
# Add expanded reference set to the overall list
reference_indices = list(reference_index_set)
# Create new reference graph
G_ref_df = G_df[G_df['old_source'].isin(reference_indices) & G_df['old_destination'].isin(reference_indices)]
G_ref = translate_network_indices(G_ref_df, reference_indices)
else:
# Each component is independent, so can be multithreaded
components = gt.label_components(G)[0].a
# Turn gt threading off and on again either side of the parallel loop
if gt.openmp_enabled():
gt.openmp_set_num_threads(1)
# Cliques are pruned, taking one reference from each, until none remain
sys.setrecursionlimit = 5000
with Pool(processes=threads) as pool:
ref_lists = pool.map(partial(cliquePrune,
graph=G,
reference_indices=reference_indices,
components_list=components),
set(components))
sys.setrecursionlimit = 1000
# Returns nested lists, which need to be flattened
reference_indices = set([entry for sublist in ref_lists for entry in sublist])
# Add type isolate if necessary - before edges are added
if type_isolate_index is not None and type_isolate_index not in reference_indices:
reference_indices.add(type_isolate_index)
if gt.openmp_enabled():
gt.openmp_set_num_threads(threads)
# Use a vertex filter to extract the subgraph of refences
# as a graphview
reference_vertex = G.new_vertex_property('bool')
for n, vertex in enumerate(G.vertices()):
if n in reference_indices:
reference_vertex[vertex] = True
else:
reference_vertex[vertex] = False
G_ref = gt.GraphView(G, vfilt = reference_vertex)
G_ref = gt.Graph(G_ref, prune = True) # https://stackoverflow.com/questions/30839929/graph-tool-graphview-object
# Find any clusters which are represented by >1 references
# This creates a dictionary: cluster_id: set(ref_idx in cluster)
clusters_in_full_graph = printClusters(G, dbOrder, printCSV=False)
reference_clusters_in_full_graph = defaultdict(set)
for reference_index in reference_indices:
reference_clusters_in_full_graph[clusters_in_full_graph[dbOrder[reference_index]]].add(reference_index)
# Calculate the component membership within the reference graph
ref_order = [name for idx, name in enumerate(dbOrder) if idx in frozenset(reference_indices)]
clusters_in_reference_graph = printClusters(G_ref, ref_order, printCSV=False)
# Record the components/clusters the references are in the reference graph
# dict: name: ref_cluster
reference_clusters_in_reference_graph = {}
for reference_name in ref_order:
reference_clusters_in_reference_graph[reference_name] = clusters_in_reference_graph[reference_name]
# Check if multi-reference components have been split as a validation test
# First iterate through clusters
network_update_required = False
for cluster_id, ref_idxs in reference_clusters_in_full_graph.items():
# Identify multi-reference clusters by this length
if len(ref_idxs) > 1:
check = list(ref_idxs)
# check if these are still in the same component in the reference graph
for i in range(len(check)):
component_i = reference_clusters_in_reference_graph[dbOrder[check[i]]]
for j in range(i + 1, len(check)):
# Add intermediate nodes
component_j = reference_clusters_in_reference_graph[dbOrder[check[j]]]
if component_i != component_j:
network_update_required = True
vertex_list, edge_list = gt.shortest_path(G, check[i], check[j])
# update reference list
for vertex in vertex_list:
reference_vertex[vertex] = True
reference_indices.add(int(vertex))
# update reference graph if vertices have been added
if network_update_required:
G_ref = gt.GraphView(G, vfilt = reference_vertex)
G_ref = gt.Graph(G_ref, prune = True) # https://stackoverflow.com/questions/30839929/graph-tool-graphview-object
# Order found references as in sketch files
reference_names = [dbOrder[int(x)] for x in sorted(reference_indices)]
refFileName = writeReferences(reference_names, outPrefix, outSuffix = outSuffix)
return reference_indices, reference_names, refFileName, G_ref
def writeReferences(refList, outPrefix, outSuffix = ""):
"""Writes chosen references to file
Args:
refList (list)
Reference names to write
outPrefix (str)
Prefix for output file
outSuffix (str)
Suffix for output file (.refs will be appended)
Returns:
refFileName (str)
The name of the file references were written to
"""
# write references to file
refFileName = outPrefix + "/" + os.path.basename(outPrefix) + outSuffix + ".refs"
with open(refFileName, 'w') as rFile:
for ref in refList:
rFile.write(ref + '\n')
return refFileName
def network_to_edges(prev_G_fn, rlist, adding_qq_dists = False,
old_ids = None, previous_pkl = None, weights = False,
use_gpu = False):
"""Load previous network, extract the edges to match the
vertex order specified in rlist, and also return weights if specified.
Args:
prev_G_fn (str or graph object)
Path of file containing existing network, or already-loaded
graph object
adding_qq_dists (bool)
Boolean specifying whether query-query edges are being added
to an existing network, such that not all the sequence IDs will
be found in the old IDs, which should already be correctly ordered
rlist (list)
List of reference sequence labels in new network
old_ids (list)
List of IDs of vertices in existing network
previous_pkl (str)
Path of pkl file containing names of sequences in
previous network
weights (bool)
Whether to return edge weights
(default = False)
use_gpu (bool)
Whether to use cugraph for graph analyses
Returns:
source_ids (list)
Source nodes for each edge
target_ids (list)
Target nodes for each edge
edge_weights (list)
Weights for each new edge
"""
# Load graph from file if passed string; else use graph object passed in
# as argument
if isinstance(prev_G_fn, str):
prev_G = load_network_file(prev_G_fn, use_gpu = use_gpu)
else:
prev_G = prev_G_fn
# load list of names in previous network if pkl name supplied
if previous_pkl is not None:
with open(previous_pkl, 'rb') as pickle_file:
old_rlist, old_qlist, self = pickle.load(pickle_file)
if self:
old_ids = old_rlist
else:
old_ids = old_rlist + old_qlist
elif old_ids is None:
sys.stderr.write('Missing .pkl file containing names of sequences in '
'previous network\n')
sys.exit(1)
# Get edges as lists of source,destination,weight using original IDs
if use_gpu:
G_df = prev_G.view_edge_list()
if weights:
if len(G_df.columns) < 3:
sys.stderr.write('Loaded network does not have edge weights; try a different '
'network or turn off graph weights\n')
exit(1)
if 'src' in G_df.columns:
G_df.rename(columns={'source': 'src','destination': 'dst'}, inplace=True)
edge_weights = G_df['weights'].to_arrow().to_pylist()
G_df.rename(columns={'src': 'source','dst': 'destination'}, inplace=True)
old_source_ids = G_df['source'].astype('int32').to_arrow().to_pylist()
old_target_ids = G_df['destination'].astype('int32').to_arrow().to_pylist()
else:
# get the source and target nodes
old_source_ids = gt.edge_endpoint_property(prev_G, prev_G.vertex_index, "source")
old_target_ids = gt.edge_endpoint_property(prev_G, prev_G.vertex_index, "target")
# get the weights
if weights:
if prev_G.edge_properties.keys() is None or 'weight' not in prev_G.edge_properties.keys():
sys.stderr.write('Loaded network does not have edge weights; try a different '
'network or turn off graph weights\n')
exit(1)
edge_weights = list(prev_G.ep['weight'])
# If appending queries to an existing network, then the recovered links can be left
# unchanged, as the new IDs are the queries, and the existing sequences will not be found
# in the list of IDs
if adding_qq_dists:
source_ids = old_source_ids
target_ids = old_target_ids
else:
# Update IDs to new versions
old_id_indices = [rlist.index(x) for x in old_ids]
# translate to indices
source_ids = [old_id_indices[x] for x in old_source_ids]
target_ids = [old_id_indices[x] for x in old_target_ids]
# return values
if weights:
return source_ids, target_ids, edge_weights
else:
return source_ids, target_ids
def print_network_summary(G, betweenness_sample = betweenness_sample_default, use_gpu = False):
"""Wrapper function for printing network information
Args:
G (graph)
List of reference sequence labels
betweenness_sample (int)
Number of sequences per component used to estimate betweenness using
a GPU. Smaller numbers are faster but less precise [default = 100]
use_gpu (bool)
Whether to use GPUs for network construction
"""
# print some summaries
(metrics, scores) = networkSummary(G, betweenness_sample = betweenness_sample, use_gpu = use_gpu)
sys.stderr.write("Network summary:\n" + "\n".join(["\tComponents\t\t\t\t" + str(metrics[0]),
"\tDensity\t\t\t\t\t" + "{:.4f}".format(metrics[1]),
"\tTransitivity\t\t\t\t" + "{:.4f}".format(metrics[2]),
"\tMean betweenness\t\t\t" + "{:.4f}".format(metrics[3]),
"\tWeighted-mean betweenness\t\t" + "{:.4f}".format(metrics[4]),
"\tScore\t\t\t\t\t" + "{:.4f}".format(scores[0]),
"\tScore (w/ betweenness)\t\t\t" + "{:.4f}".format(scores[1]),
"\tScore (w/ weighted-betweenness)\t\t" + "{:.4f}".format(scores[2])])
+ "\n")
def initial_graph_properties(rlist, qlist):
"""Initial processing of sequence names for
network construction.
Args:
rlist (list)
List of reference sequence labels
qlist (list)
List of query sequence labels
Returns:
vertex_labels (list)
Ordered list of sequences in network
self_comparison (bool)
Whether the network is being constructed from all-v-all distances or
reference-v-query information
"""
if rlist == qlist:
self_comparison = True
vertex_labels = rlist
else:
self_comparison = False
vertex_labels = rlist + qlist
return vertex_labels, self_comparison
def process_weights(distMat, weights_type):
"""Calculate edge weights from the distance matrix
Args:
distMat (2 column ndarray)
Numpy array of pairwise distances
weights_type (str)
Measure to calculate from the distMat to use as edge weights in network
- options are core, accessory or euclidean distance
Returns:
processed_weights (list)
Edge weights
"""
processed_weights = []
if weights_type is not None and distMat is not None:
# Check weights type is valid
if weights_type not in accepted_weights_types:
sys.stderr.write("Unable to calculate distance type " + str(weights_type) + "; "
"accepted types are " + str(accepted_weights_types) + "\n")
if weights_type == 'euclidean':
processed_weights = np.linalg.norm(distMat, axis = 1).tolist()
elif weights_type == 'core':
processed_weights = distMat[:, 0].tolist()
elif weights_type == 'accessory':
processed_weights = distMat[:, 1].tolist()
else:
sys.stderr.write('Require distance matrix to calculate distances\n')
return processed_weights
def process_previous_network(previous_network = None, adding_qq_dists = False, old_ids = None,
previous_pkl = None, vertex_labels = None, weights = False, use_gpu = False):
"""Extract edge types from an existing network
Args:
previous_network (str or graph object)
Name of file containing a previous network to be integrated into this new
network, or already-loaded graph object
adding_qq_dists (bool)
Boolean specifying whether query-query edges are being added
to an existing network, such that not all the sequence IDs will
be found in the old IDs, which should already be correctly ordered
old_ids (list)
Ordered list of vertex names in previous network
previous_pkl (str)
Name of file containing the names of the sequences in the previous_network
ordered based on the original network construction
vertex_labels (list)
Ordered list of sequence labels
weights (bool)
Whether weights should be extracted from the previous network
use_gpu (bool)
Whether to use GPUs for network construction
Returns:
extra_sources (list)
List of source node identifiers
extra_targets (list)
List of destination node identifiers
extra_weights (list or None)
List of edge weights
"""
if previous_pkl is not None or old_ids is not None:
if weights:
# Extract from network
extra_sources, extra_targets, extra_weights = network_to_edges(previous_network,
vertex_labels,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_pkl = previous_pkl,
weights = True,
use_gpu = use_gpu)
else:
# Extract from network
extra_sources, extra_targets = network_to_edges(previous_network,
vertex_labels,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_pkl = previous_pkl,
weights = False,
use_gpu = use_gpu)
extra_weights = None
else:
sys.stderr.write('A distance pkl corresponding to ' + previous_pkl + ' is required for loading\n')
sys.exit(1)
return extra_sources, extra_targets, extra_weights
def construct_network_from_edge_list(rlist,
qlist,
edge_list,
weights = None,
distMat = None,
previous_network = None,
adding_qq_dists = False,
old_ids = None,
previous_pkl = None,
betweenness_sample = betweenness_sample_default,
summarise = True,
use_gpu = False):
"""Construct an undirected network using a data frame of edges. Nodes are samples and
edges where samples are within the same cluster
Will print summary statistics about the network to ``STDERR``
Args:
rlist (list)
List of reference sequence labels
qlist (list)
List of query sequence labels
G_df (cudf or pandas data frame)
Data frame in which the first two columns are the nodes linked by edges
weights (list)
List of edge weights
distMat (2 column ndarray)
Numpy array of pairwise distances
previous_network (str or graph object)
Name of file containing a previous network to be integrated into this new
network, or the already-loaded graph object
adding_qq_dists (bool)
Boolean specifying whether query-query edges are being added
to an existing network, such that not all the sequence IDs will
be found in the old IDs, which should already be correctly ordered
old_ids (list)
Ordered list of vertex names in previous network
previous_pkl (str)
Name of file containing the names of the sequences in the previous_network
betweenness_sample (int)
Number of sequences per component used to estimate betweenness using
a GPU. Smaller numbers are faster but less precise [default = 100]
summarise (bool)
Whether to calculate and print network summaries with :func:`~networkSummary`
(default = True)
use_gpu (bool)
Whether to use GPUs for network construction
Returns:
G (graph)
The resulting network
"""
# Check GPU library use
use_gpu = check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = True)
# data structures
vertex_labels, self_comparison = initial_graph_properties(rlist, qlist)
# Create new network
if use_gpu:
# benchmarking concurs with https://stackoverflow.com/questions/55922162/recommended-cudf-dataframe-construction
if len(edge_list) > 1:
edge_array = cp.array(edge_list, dtype = np.int32)
edge_gpu_matrix = cuda.to_device(edge_array)
G_df = cudf.DataFrame(edge_gpu_matrix, columns = ['source','destination'])
else:
# Cannot generate an array when one edge
G_df = cudf.DataFrame(columns = ['source','destination'])
G_df['source'] = [edge_list[0][0]]
G_df['destination'] = [edge_list[0][1]]
if weights is not None:
G_df['weights'] = weights
G = construct_network_from_df(rlist, qlist, G_df,
weights = (weights is not None),
distMat = distMat,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_network = previous_network,
previous_pkl = previous_pkl,
summarise = False,
use_gpu = use_gpu)
else:
# Load previous network
if previous_network is not None:
extra_sources, extra_targets, extra_weights = \
process_previous_network(previous_network = previous_network,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_pkl = previous_pkl,
vertex_labels = vertex_labels,
weights = (weights is not None),
use_gpu = use_gpu)
# Construct list of tuples for graph-tool
# Include information from previous graph if supplied
if weights is not None:
weighted_edges = []
for ((src, dest), weight) in zip(edge_list, weights):
weighted_edges.append((src, dest, weight))
if previous_network is not None:
for (src, dest, weight) in zip(extra_sources, extra_targets, extra_weights):
weighted_edges.append((src, dest, weight))
edge_list = weighted_edges
else:
if previous_network is not None:
for (src, dest) in zip(extra_sources, extra_targets):
edge_list.append((src, dest))
# build the graph
G = gt.Graph(directed = False)
G.add_vertex(len(vertex_labels))
if weights is not None:
eweight = G.new_ep("float")
G.add_edge_list(edge_list, eprops = [eweight])
G.edge_properties["weight"] = eweight
else:
G.add_edge_list(edge_list)
if summarise:
print_network_summary(G, betweenness_sample = betweenness_sample, use_gpu = use_gpu)
return G
def construct_network_from_df(rlist,
qlist,
G_df,
weights = False,
distMat = None,
previous_network = None,
adding_qq_dists = False,
old_ids = None,
previous_pkl = None,
betweenness_sample = betweenness_sample_default,
summarise = True,
use_gpu = False):
"""Construct an undirected network using a data frame of edges. Nodes are samples and
edges where samples are within the same cluster
Will print summary statistics about the network to ``STDERR``
Args:
rlist (list)
List of reference sequence labels
qlist (list)
List of query sequence labels
G_df (cudf or pandas data frame)
Data frame in which the first two columns are the nodes linked by edges
weights (bool)
Whether weights in the G_df data frame should be included in the network
distMat (2 column ndarray)
Numpy array of pairwise distances
previous_network (str or graph object)
Name of file containing a previous network to be integrated into this new
network, or the already-loaded graph object
adding_qq_dists (bool)
Boolean specifying whether query-query edges are being added
to an existing network, such that not all the sequence IDs will
be found in the old IDs, which should already be correctly ordered
old_ids (list)
Ordered list of vertex names in previous network
previous_pkl (str)
Name of file containing the names of the sequences in the previous_network
betweenness_sample (int)
Number of sequences per component used to estimate betweenness using
a GPU. Smaller numbers are faster but less precise [default = 100]
summarise (bool)
Whether to calculate and print network summaries with :func:`~networkSummary`
(default = True)
use_gpu (bool)
Whether to use GPUs for network construction
Returns:
G (graph)
The resulting network
"""
# Check GPU library use
use_gpu = check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = True)
# data structures
vertex_labels, self_comparison = initial_graph_properties(rlist, qlist)
# Check df format is correct
if weights:
G_df.columns = ['source','destination','weights']
else:
G_df.columns = ['source','destination']
# Load previous network
if previous_network is not None:
extra_sources, extra_targets, extra_weights = process_previous_network(previous_network = previous_network,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_pkl = previous_pkl,
vertex_labels = vertex_labels,
weights = weights,
use_gpu = use_gpu)
if use_gpu:
G_extra_df = cudf.DataFrame()
else:
G_extra_df = pd.DataFrame()
G_extra_df['source'] = extra_sources
G_extra_df['destination'] = extra_targets
if extra_weights is not None:
G_extra_df['weights'] = extra_weights
G_df = cudf.concat([G_df,G_extra_df], ignore_index = True)
if use_gpu:
# direct conversion
# ensure the highest-integer node is included in the edge list
# by adding a self-loop if necessary; see https://github.com/rapidsai/cugraph/issues/1206
max_in_vertex_labels = len(vertex_labels)-1
use_weights = False
if weights:
use_weights = True
G = add_self_loop(G_df, max_in_vertex_labels, weights = use_weights, renumber = False)
else:
# Convert bool to list of weights or None
if weights:
weights = G_df['weights']
else:
weights = None
# Convert data frame to list of tuples
connections = list(zip(*[G_df[c].values.tolist() for c in G_df[['source','destination']]]))
G = construct_network_from_edge_list(rlist, qlist, connections,
weights = weights,
distMat = distMat,
previous_network = previous_network,
old_ids = old_ids,
previous_pkl = previous_pkl,
summarise = False,
use_gpu = use_gpu)
if summarise:
print_network_summary(G, betweenness_sample = betweenness_sample, use_gpu = use_gpu)
return G
def construct_network_from_sparse_matrix(rlist,
qlist,
sparse_input,
weights = None,
previous_network = None,
previous_pkl = None,
betweenness_sample = betweenness_sample_default,
summarise = True,
use_gpu = False):
"""Construct an undirected network using a sparse matrix. Nodes are samples and
edges where samples are within the same cluster
Will print summary statistics about the network to ``STDERR``
Args:
rlist (list)
List of reference sequence labels
qlist (list)
List of query sequence labels
sparse_input (numpy.array)
Sparse distance matrix from lineage fit
weights (list)
List of weights for each edge in the network
distMat (2 column ndarray)
Numpy array of pairwise distances
previous_network (str)
Name of file containing a previous network to be integrated into this new
network
previous_pkl (str)
Name of file containing the names of the sequences in the previous_network
betweenness_sample (int)
Number of sequences per component used to estimate betweenness using
a GPU. Smaller numbers are faster but less precise [default = 100]
summarise (bool)
Whether to calculate and print network summaries with :func:`~networkSummary`
(default = True)
use_gpu (bool)
Whether to use GPUs for network construction
Returns:
G (graph)
The resulting network
"""
# Check GPU library use
use_gpu = check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = True)
if use_gpu:
G_df = cudf.DataFrame()
else:
G_df = pd.DataFrame()
G_df['source'] = sparse_input.row
G_df['destination'] = sparse_input.col
G_df['weights'] = sparse_input.data
G = construct_network_from_df(rlist, qlist, G_df,
weights = True,
previous_network = previous_network,
previous_pkl = previous_pkl,
betweenness_sample = betweenness_sample,
summarise = False,
use_gpu = use_gpu)
if summarise:
print_network_summary(G, betweenness_sample = betweenness_sample, use_gpu = use_gpu)
return G
def construct_dense_weighted_network(rlist, distMat, weights_type = None, use_gpu = False):
"""Construct an undirected network using sequence lists, assignments of pairwise distances
to clusters, and the identifier of the cluster assigned to within-strain distances.
Nodes are samples and edges where samples are within the same cluster
Will print summary statistics about the network to ``STDERR``
Args:
rlist (list)
List of reference sequence labels
distMat (2 column ndarray)
Numpy array of pairwise distances
weights_type (str)
Type of weight to use for network
use_gpu (bool)
Whether to use GPUs for network construction
Returns:
G (graph)
The resulting network
"""
# Check GPU library use
use_gpu = check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = True)
# data structures
vertex_labels, self_comparison = initial_graph_properties(rlist, rlist)
# Filter weights to only the relevant edges
if weights is None:
sys.stderr.write("Need weights to construct weighted network\n")
sys.exit(1)
# Process weights
weights = process_weights(distMat, weights_type)
# Convert edge indices to tuples
edge_list = poppunk_refine.generateAllTuples(num_ref = len(rlist),
self = True,
int_offset = 0)
if use_gpu:
# Construct network with GPU via data frame
G_df = cudf.DataFrame(columns = ['source','destination'])
G_df['source'] = [edge_list[0][0]]
G_df['destination'] = [edge_list[0][1]]
G_df['weights'] = weights
max_in_vertex_labels = len(vertex_labels)-1
G = add_self_loop(G_df, max_in_vertex_labels, weights = True, renumber = False)
else:
# Construct network with CPU via edge list
weighted_edges = []
for ((src, dest), weight) in zip(edge_list, weights):
weighted_edges.append((src, dest, weight))
# build the graph
G = gt.Graph(directed = False)
G.add_vertex(len(vertex_labels))
eweight = G.new_ep("float")
# Could alternatively assign weights through eweight.a = weights
G.add_edge_list(weighted_edges, eprops = [eweight])
G.edge_properties["weight"] = eweight
return G
def construct_network_from_assignments(rlist, qlist, assignments, within_label = 1, int_offset = 0,
weights = None, distMat = None, weights_type = None, previous_network = None, old_ids = None,
adding_qq_dists = False, previous_pkl = None, betweenness_sample = betweenness_sample_default,
summarise = True, use_gpu = False):
"""Construct an undirected network using sequence lists, assignments of pairwise distances
to clusters, and the identifier of the cluster assigned to within-strain distances.
Nodes are samples and edges where samples are within the same cluster
Will print summary statistics about the network to ``STDERR``
Args:
rlist (list)
List of reference sequence labels
qlist (list)
List of query sequence labels
assignments (numpy.array or int)
Labels of most likely cluster assignment
within_label (int)
The label for the cluster representing within-strain distances
int_offset (int)
Constant integer to add to each node index
weights (list)
List of weights for each edge in the network
distMat (2 column ndarray)
Numpy array of pairwise distances
weights_type (str)
Measure to calculate from the distMat to use as edge weights in network
- options are core, accessory or euclidean distance
previous_network (str)
Name of file containing a previous network to be integrated into this new
network
old_ids (list)
Ordered list of vertex names in previous network
adding_qq_dists (bool)
Boolean specifying whether query-query edges are being added
to an existing network, such that not all the sequence IDs will
be found in the old IDs, which should already be correctly ordered
previous_pkl (str)
Name of file containing the names of the sequences in the previous_network
betweenness_sample (int)
Number of sequences per component used to estimate betweenness using
a GPU. Smaller numbers are faster but less precise [default = 100]
summarise (bool)
Whether to calculate and print network summaries with :func:`~networkSummary`
(default = True)
use_gpu (bool)
Whether to use GPUs for network construction
Returns:
G (graph)
The resulting network
"""
# Check GPU library use
use_gpu = check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = True)
# Filter weights to only the relevant edges
if weights is not None:
weights = weights[assignments == within_label]
elif distMat is not None and weights_type is not None:
if isinstance(assignments, list):
assignments = np.array(assignments)
distMat = distMat[assignments == within_label,:]
weights = process_weights(distMat, weights_type)
# Convert edge indices to tuples
connections = poppunk_refine.generateTuples(assignments,
within_label,
self = (rlist == qlist),
num_ref = len(rlist),
int_offset = int_offset)
# Construct network using edge list
G = construct_network_from_edge_list(rlist, qlist, connections,
weights = weights,
distMat = distMat,
previous_network = previous_network,
adding_qq_dists = adding_qq_dists,
old_ids = old_ids,
previous_pkl = previous_pkl,
summarise = False,
use_gpu = use_gpu)
if summarise:
print_network_summary(G, betweenness_sample = betweenness_sample, use_gpu = use_gpu)
return G
def get_cugraph_triangles(G):
"""Counts the number of triangles in a cugraph
network. Can be removed when the cugraph issue
https://github.com/rapidsai/cugraph/issues/1043 is fixed.
Args:
G (cugraph network)
Network to be analysed
Returns:
triangle_count (int)
Count of triangles in graph
"""
nlen = G.number_of_vertices()
df = G.view_edge_list()
A = cp.full((nlen, nlen), 0, dtype = cp.int32)
A[df.src.values, df.dst.values] = 1
A = cp.maximum( A, A.transpose() )
triangle_count = int(cp.around(cp.trace(cp.matmul(A, cp.matmul(A, A)))/6,0))
return triangle_count
def networkSummary(G, calc_betweenness=True, betweenness_sample = betweenness_sample_default,
use_gpu = False):
"""Provides summary values about the network
Args:
G (graph)
The network of strains
calc_betweenness (bool)
Whether to calculate betweenness stats
use_gpu (bool)
Whether to use cugraph for graph analysis
Returns:
metrics (list)
List with # components, density, transitivity, mean betweenness
and weighted mean betweenness
scores (list)
List of scores
"""
if use_gpu:
use_gpu = check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = True)
component_assignments = cugraph.components.connectivity.connected_components(G)
component_nums = component_assignments['labels'].unique().astype(int)
components = len(component_nums)
density = G.number_of_edges()/(0.5 * G.number_of_vertices() * G.number_of_vertices() - 1)
# consistent with graph-tool for small graphs - triangle counts differ for large graphs
# could reflect issue https://github.com/rapidsai/cugraph/issues/1043
# this command can be restored once the above issue is fixed - scheduled for cugraph 0.20
# triangle_count = cugraph.community.triangle_count.triangles(G)/3
triangle_count = 3*get_cugraph_triangles(G)
degree_df = G.in_degree()
# consistent with graph-tool
triad_count = 0.5 * sum([d * (d - 1) for d in degree_df[degree_df['degree'] > 1]['degree'].to_pandas()])
if triad_count > 0:
transitivity = triangle_count/triad_count
else:
transitivity = 0.0
else:
component_assignments, component_frequencies = gt.label_components(G)
components = len(component_frequencies)
density = len(list(G.edges()))/(0.5 * len(list(G.vertices())) * (len(list(G.vertices())) - 1))
transitivity = gt.global_clustering(G)[0]
mean_bt = 0
weighted_mean_bt = 0
if calc_betweenness:
betweenness = []
sizes = []
if use_gpu:
component_frequencies = component_assignments['labels'].value_counts(sort = True, ascending = False)
for component in component_nums.to_pandas():
size = component_frequencies[component_frequencies.index == component].iloc[0].astype(int)
if size > 3:
component_vertices = component_assignments['vertex'][component_assignments['labels']==component]
subgraph = cugraph.subgraph(G, component_vertices)
if len(component_vertices) >= betweenness_sample:
component_betweenness = cugraph.betweenness_centrality(subgraph,
k = betweenness_sample,
normalized = True)
else:
component_betweenness = cugraph.betweenness_centrality(subgraph,
normalized = True)
betweenness.append(component_betweenness['betweenness_centrality'].max())
sizes.append(size)
else:
for component, size in enumerate(component_frequencies):
if size > 3:
vfilt = component_assignments.a == component
subgraph = gt.GraphView(G, vfilt=vfilt)
betweenness.append(max(gt.betweenness(subgraph, norm = True)[0].a))
sizes.append(size)
if len(betweenness) > 1:
mean_bt = np.mean(betweenness)
weighted_mean_bt = np.average(betweenness, weights=sizes)
elif len(betweenness) == 1:
mean_bt = betweenness[0]
weighted_mean_bt = betweenness[0]
# Calculate scores
metrics = [components, density, transitivity, mean_bt, weighted_mean_bt]
base_score = transitivity * (1 - density)
scores = [base_score, base_score * (1 - metrics[3]), base_score * (1 - metrics[4])]
return(metrics, scores)
def addQueryToNetwork(dbFuncs, rList, qList, G, kmers,
assignments, model, queryDB, distances = None, distance_type = 'euclidean',
queryQuery = False, strand_preserved = False, weights = None, threads = 1,
use_gpu = False):
"""Finds edges between queries and items in the reference database,
and modifies the network to include them.
Args:
dbFuncs (list)
List of backend functions from :func:`~PopPUNK.utils.setupDBFuncs`
rList (list)
List of reference names
qList (list)
List of query names
G (graph)
Network to add to (mutated)
kmers (list)
List of k-mer sizes
assignments (numpy.array)
Cluster assignment of items in qlist
model (ClusterModel)
Model fitted to reference database
queryDB (str)
Query database location
distances (str)
Prefix of distance files for extending network
distance_type (str)
Distance type to use as weights in network
queryQuery (bool)
Add in all query-query distances
(default = False)
strand_preserved (bool)
Whether to treat strand as known (i.e. ignore rc k-mers)
when adding random distances. Only used if queryQuery = True
[default = False]
weights (numpy.array)
If passed, the core,accessory distances for each assignment, which will
be annotated as an edge attribute
threads (int)
Number of threads to use if new db created
use_gpu (bool)
Whether to use cugraph for analysis
(default = 1)
Returns:
distMat (numpy.array)
Query-query distances
"""
# initalise functions
queryDatabase = dbFuncs['queryDatabase']
# do not calculate weights unless specified
if weights is None:
distance_type = None
# initialise links data structure
new_edges = []
assigned = set()
# These are returned
qqDistMat = None
# store links for each query in a list of edge tuples
ref_count = len(rList)
# Add queries to network
G = construct_network_from_assignments(rList,
qList,
assignments,
within_label = model.within_label,
previous_network = G,
old_ids = rList,
distMat = weights,
weights_type = distance_type,
summarise = False,
use_gpu = use_gpu)
# Calculate all query-query distances too, if updating database
if queryQuery:
if len(qList) == 1:
qqDistMat = np.zeros((0, 2), dtype=np.float32)
else:
sys.stderr.write("Calculating all query-query distances\n")
addRandom(queryDB, qList, kmers, strand_preserved, threads = threads)
qqDistMat = queryDatabase(rNames = qList,
qNames = qList,
dbPrefix = queryDB,
queryPrefix = queryDB,
klist = kmers,
self = True,
number_plot_fits = 0,
threads = threads)
if distance_type == 'core':
queryAssignation = model.assign(qqDistMat, slope = 0)
elif distance_type == 'accessory':
queryAssignation = model.assign(qqDistMat, slope = 1)
else:
queryAssignation = model.assign(qqDistMat)
# Add queries to network
G = construct_network_from_assignments(qList,
qList,
queryAssignation,
int_offset = ref_count,
within_label = model.within_label,
previous_network = G,
old_ids = rList,
adding_qq_dists = True,
distMat = qqDistMat,
weights_type = distance_type,
summarise = False,
use_gpu = use_gpu)
# Otherwise only calculate query-query distances for new clusters
else:
# identify potentially new lineages in list: unassigned is a list of queries with no hits
unassigned = set(qList).difference(assigned)
query_indices = {k:v+ref_count for v,k in enumerate(qList)}
# process unassigned query sequences, if there are any
if len(unassigned) > 1:
sys.stderr.write("Found novel query clusters. Calculating distances between them.\n")
# use database construction methods to find links between unassigned queries
addRandom(queryDB, qList, kmers, strand_preserved, threads = threads)
qqDistMat = queryDatabase(rNames = list(unassigned),
qNames = list(unassigned),
dbPrefix = queryDB,
queryPrefix = queryDB,
klist = kmers,
self = True,
number_plot_fits = 0,
threads = threads)
if distance_type == 'core':
queryAssignation = model.assign(qqDistMat, slope = 0)
elif distance_type == 'accessory':
queryAssignation = model.assign(qqDistMat, slope = 1)
else:
queryAssignation = model.assign(qqDistMat)
# identify any links between queries and store in the same links dict
# links dict now contains lists of links both to original database and new queries
# have to use names and link to query list in order to match to node indices
for row_idx, (assignment, (query1, query2)) in enumerate(zip(queryAssignation, iterDistRows(qList, qList, self = True))):
if assignment == model.within_label:
if weights is not None:
if distance_type == 'core':
dist = weights[row_idx, 0]
elif distance_type == 'accessory':
dist = weights[row_idx, 1]
else:
dist = np.linalg.norm(weights[row_idx, :])
edge_tuple = (query_indices[query1], query_indices[query2], dist)
else:
edge_tuple = (query_indices[query1], query_indices[query2])
new_edges.append(edge_tuple)
G = construct_network_from_assignments(qList,
qList,
queryAssignation,
int_offset = ref_count,
within_label = model.within_label,
previous_network = G,
old_ids = rList + qList,
adding_qq_dists = True,
distMat = qqDistMat,
weights_type = distance_type,
summarise = False,
use_gpu = use_gpu)
return G, qqDistMat
def add_self_loop(G_df, seq_num, weights = False, renumber = True):
"""Adds self-loop to cugraph graph to ensure all nodes are included in
the graph, even if singletons.
Args:
G_df (cudf)
cudf data frame containing edge list
seq_num (int)
The expected number of nodes in the graph
renumber (bool)
Whether to renumber the vertices when added to the graph
Returns:
G_new (graph)
Dictionary of cluster assignments (keys are sequence names)
"""
# use self-loop to ensure all nodes are present
min_in_df = np.amin([G_df['source'].min(), G_df['destination'].min()])
if min_in_df.item() > 0:
G_self_loop = cudf.DataFrame()
G_self_loop['source'] = [0]
G_self_loop['destination'] = [0]
if weights:
G_self_loop['weights'] = 0.0
G_df = cudf.concat([G_df,G_self_loop], ignore_index = True)
max_in_df = np.amax([G_df['source'].max(),G_df['destination'].max()])
if max_in_df.item() != seq_num:
G_self_loop = cudf.DataFrame()
G_self_loop['source'] = [seq_num]
G_self_loop['destination'] = [seq_num]
if weights:
G_self_loop['weights'] = 0.0
G_df = cudf.concat([G_df,G_self_loop], ignore_index = True)
# Construct graph
G_new = cugraph.Graph()
if weights:
G_new.from_cudf_edgelist(G_df, edge_attr = 'weights', renumber = renumber)
else:
G_new.from_cudf_edgelist(G_df, renumber = renumber)
return G_new
def printClusters(G, rlist, outPrefix=None, oldClusterFile=None,
externalClusterCSV=None, printRef=True, printCSV=True,
clustering_type='combined', write_unwords=True,
use_gpu = False):
"""Get cluster assignments
Also writes assignments to a CSV file
Args:
G (graph)
Network used to define clusters
outPrefix (str)
Prefix for output CSV
Default = None
oldClusterFile (str)
CSV with previous cluster assignments.
Pass to ensure consistency in cluster assignment name.
Default = None
externalClusterCSV (str)
CSV with cluster assignments from any source. Will print a file
relating these to new cluster assignments
Default = None
printRef (bool)
If false, print only query sequences in the output
Default = True
printCSV (bool)
Print results to file
Default = True
clustering_type (str)
Type of clustering network, used for comparison with old clusters
Default = 'combined'
write_unwords (bool)
Write clusters with a pronouncable name rather than numerical index
Default = True
use_gpu (bool)
Whether to use cugraph for network analysis
Returns:
clustering (dict)
Dictionary of cluster assignments (keys are sequence names)
"""
if oldClusterFile == None and printRef == False:
raise RuntimeError("Trying to print query clusters with no query sequences")
if write_unwords and not printCSV:
write_unwords = False
# get a sorted list of component assignments
if use_gpu:
use_gpu = check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = True)
component_assignments = cugraph.components.connectivity.connected_components(G)
component_frequencies = component_assignments['labels'].value_counts(sort = True, ascending = False)
newClusters = [set() for rank in range(component_frequencies.size)]
for isolate_index, isolate_name in enumerate(rlist): # assume sorted at the moment
component = component_assignments['labels'].iloc[isolate_index].item()
component_rank_bool = component_frequencies.index == component
component_rank = np.argmax(component_rank_bool.to_array())
newClusters[component_rank].add(isolate_name)
else:
component_assignments, component_frequencies = gt.label_components(G)
component_frequency_ranks = len(component_frequencies) - rankdata(component_frequencies, method = 'ordinal').astype(int)
# use components to determine new clusters
newClusters = [set() for rank in range(len(component_frequency_ranks))]
for isolate_index, isolate_name in enumerate(rlist):
component = component_assignments.a[isolate_index]
component_rank = component_frequency_ranks[component]
newClusters[component_rank].add(isolate_name)
oldNames = set()
if oldClusterFile != None:
oldAllClusters = readIsolateTypeFromCsv(oldClusterFile, mode = 'external', return_dict = False)
oldClusters = oldAllClusters[list(oldAllClusters.keys())[0]]
new_id = len(oldClusters.keys()) + 1 # 1-indexed
while new_id in oldClusters:
new_id += 1 # in case clusters have been merged
# Samples in previous clustering
for prev_cluster in oldClusters.values():
for prev_sample in prev_cluster:
oldNames.add(prev_sample)
# Assign each cluster a name
clustering = {}
foundOldClusters = []
cluster_unword = {}
if write_unwords:
unword_generator = gen_unword()
for newClsIdx, newCluster in enumerate(newClusters):
needs_unword = False
# Ensure consistency with previous labelling
if oldClusterFile != None:
merge = False
cls_id = None
# Samples in this cluster that are not queries
ref_only = oldNames.intersection(newCluster)
# A cluster with no previous observations
if len(ref_only) == 0:
cls_id = str(new_id) # harmonise data types; string flexibility helpful
new_id += 1
needs_unword = True
else:
# Search through old cluster IDs to find a match
for oldClusterName, oldClusterMembers in oldClusters.items():
join = ref_only.intersection(oldClusterMembers)
if len(join) > 0:
# Check cluster is consistent with previous definitions
if oldClusterName in foundOldClusters:
sys.stderr.write("WARNING: Old cluster " + oldClusterName + " split"
" across multiple new clusters\n")
else:
foundOldClusters.append(oldClusterName)
# Query has merged clusters
if len(join) < len(ref_only):
merge = True
needs_unword = True
if cls_id == None:
cls_id = oldClusterName
else:
cls_id += "_" + oldClusterName
# Exact match -> same name as before
elif len(join) == len(ref_only):
assert merge == False # should not have already been part of a merge
cls_id = oldClusterName
break
# Report merges
if merge:
merged_ids = cls_id.split("_")
sys.stderr.write("Clusters " + ",".join(merged_ids) + " have merged into " + cls_id + "\n")
# Otherwise just number sequentially starting from 1
else:
cls_id = newClsIdx + 1
needs_unword = True
if write_unwords and needs_unword:
unword = next(unword_generator)
else:
unword = None
for cluster_member in newCluster:
clustering[cluster_member] = cls_id
if unword is not None:
cluster_unword[cluster_member] = unword
# print clustering to file
if printCSV:
outFileName = outPrefix + "_clusters.csv"
with open(outFileName, 'w') as cluster_file:
cluster_file.write("Taxon,Cluster\n")
if write_unwords:
unword_file = open(outPrefix + "_unword_clusters.csv", 'w')
unword_file.write("Taxon,Cluster_name\n")
# sort the clusters by frequency - define a list with a custom sort order
# first line gives tuples e.g. (1, 28), (2, 17) - cluster 1 has 28 members, cluster 2 has 17 members
# second line takes first element - the cluster IDs sorted by frequency
freq_order = sorted(dict(Counter(clustering.values())).items(), key=operator.itemgetter(1), reverse=True)
freq_order = [x[0] for x in freq_order]
# iterate through cluster dictionary sorting by value using above custom sort order
for cluster_member, cluster_name in sorted(clustering.items(), key=lambda i:freq_order.index(i[1])):
if printRef or cluster_member not in oldNames:
cluster_file.write(",".join((cluster_member, str(cluster_name))) + "\n")
if write_unwords and cluster_member in cluster_unword:
unword_file.write(",".join((cluster_member, cluster_unword[cluster_member])) + "\n")
if write_unwords:
unword_file.close()
if externalClusterCSV is not None:
printExternalClusters(newClusters, externalClusterCSV, outPrefix, oldNames, printRef)
return(clustering)
def printExternalClusters(newClusters, extClusterFile, outPrefix,
oldNames, printRef = True):
"""Prints cluster assignments with respect to previously defined
clusters or labels.
Args:
newClusters (set iterable)
The components from the graph G, defining the PopPUNK clusters
extClusterFile (str)
A CSV file containing definitions of the external clusters for
each sample (does not need to contain all samples)
outPrefix (str)
Prefix for output CSV (_external_clusters.csv)
oldNames (list)
A list of the reference sequences
printRef (bool)
If false, print only query sequences in the output
Default = True
"""
# Object to store output csv datatable
d = defaultdict(list)
# Read in external clusters
extClusters = \
readIsolateTypeFromCsv(extClusterFile,
mode = 'external',
return_dict = True)
# Go through each cluster (as defined by poppunk) and find the external
# clusters that had previously been assigned to any sample in the cluster
for ppCluster in newClusters:
# Store clusters as a set to avoid duplicates
prevClusters = defaultdict(set)
for sample in ppCluster:
for extCluster in extClusters:
if sample in extClusters[extCluster]:
prevClusters[extCluster].add(extClusters[extCluster][sample])
# Go back through and print the samples that were found
for sample in ppCluster:
if printRef or sample not in oldNames:
d['sample'].append(sample)
for extCluster in extClusters:
if extCluster in prevClusters:
d[extCluster].append(";".join(prevClusters[extCluster]))
else:
d[extCluster].append("NA")
if "sample" not in d:
sys.stderr.write("WARNING: No new samples found, cannot write external clusters\n")
else:
pd.DataFrame(data=d).to_csv(outPrefix + "_external_clusters.csv",
columns = ["sample"] + list(extClusters.keys()),
index = False)
def generate_minimum_spanning_tree(G, from_cugraph = False):
"""Generate a minimum spanning tree from a network
Args:
G (network)
Graph tool network
from_cugraph (bool)
If a pre-calculated MST from cugraph
[default = False]
Returns:
mst_network (str)
Minimum spanning tree (as graph-tool graph)
"""
#
# Create MST
#
if from_cugraph:
mst_network = G
else:
sys.stderr.write("Starting calculation of minimum-spanning tree\n")
# Test if weighted network and calculate minimum spanning tree
if "weight" in G.edge_properties:
mst_edge_prop_map = gt.min_spanning_tree(G, weights = G.ep["weight"])
mst_network = gt.GraphView(G, efilt = mst_edge_prop_map)
mst_network = gt.Graph(mst_network, prune = True)
else:
sys.stderr.write("generate_minimum_spanning_tree requires a weighted graph\n")
raise RuntimeError("MST passed unweighted graph")
# Find seed nodes as those with greatest outdegree in each component
num_components = 1
seed_vertices = set()
if from_cugraph:
mst_df = cugraph.components.connectivity.connected_components(mst_network)
num_components_idx = mst_df['labels'].max()
num_components = mst_df.iloc[num_components_idx]['labels']
if num_components > 1:
mst_df['degree'] = mst_network.in_degree()['degree']
# idxmax only returns first occurrence of maximum so should maintain
# MST - check cuDF implementation is the same
max_indices = mst_df.groupby(['labels'])['degree'].idxmax()
seed_vertices = mst_df.iloc[max_indices]['vertex']
else:
component_assignments, component_frequencies = gt.label_components(mst_network)
num_components = len(component_frequencies)
if num_components > 1:
for component_index in range(len(component_frequencies)):
component_members = component_assignments.a == component_index
component = gt.GraphView(mst_network, vfilt = component_members)
component_vertices = component.get_vertices()
out_degrees = component.get_out_degrees(component_vertices)
seed_vertex = list(component_vertices[np.where(out_degrees == np.amax(out_degrees))])
seed_vertices.add(seed_vertex[0]) # Can only add one otherwise not MST
# If multiple components, add distances between seed nodes
if num_components > 1:
# Extract edges and maximum edge length - as DF for cugraph
# list of tuples for graph-tool
if from_cugraph:
# With cugraph the MST is already calculated
# so no extra edges can be retrieved from the graph
G_df = G.view_edge_list()
max_weight = G_df['weights'].max()
first_seed = seed_vertices.iloc[0]
G_seed_link_df = cudf.DataFrame()
G_seed_link_df['dst'] = seed_vertices.iloc[1:seed_vertices.size]
G_seed_link_df['src'] = seed_vertices.iloc[0]
G_seed_link_df['weights'] = seed_vertices.iloc[0]
G_df = G_df.append(G_seed_link_df)
else:
# With graph-tool look to retrieve edges in larger graph
connections = []
max_weight = float(np.max(G.edge_properties["weight"].a))
# Identify edges between seeds to link components together
for ref in seed_vertices:
seed_edges = G.get_all_edges(ref, [G.ep['weight']])
found = False # Not all edges may be in graph
for seed_edge in seed_edges:
if seed_edge[1] in seed_vertices:
found = True
connections.append((seed_edge))
# TODO: alternative would be to requery the DB (likely quick)
if found == False:
for query in seed_vertices:
if query != ref:
connections.append((ref, query, max_weight))
# Construct graph
if from_cugraph:
mst_network = cugraph.Graph()
G_df.rename(columns={'src': 'source','dst': 'destination'}, inplace=True)
mst_network.from_cudf_edgelist(G_df, edge_attr='weights', renumber=False)
else:
seed_G = gt.Graph(directed = False)
seed_G.add_vertex(len(seed_vertex))
eweight = seed_G.new_ep("float")
seed_G.add_edge_list(connections, eprops = [eweight])
seed_G.edge_properties["weight"] = eweight
seed_mst_edge_prop_map = gt.min_spanning_tree(seed_G, weights = seed_G.ep["weight"])
seed_mst_network = gt.GraphView(seed_G, efilt = seed_mst_edge_prop_map)
# Insert seed MST into original MST - may be possible to use graph_union with include=True & intersection
deep_edges = seed_mst_network.get_edges([seed_mst_network.ep["weight"]])
mst_network.add_edge_list(deep_edges)
sys.stderr.write("Completed calculation of minimum-spanning tree\n")
return mst_network
def get_vertex_list(G, use_gpu = False):
"""Generate a list of node indices
Args:
G (network)
Graph tool network
use_gpu (bool)
Whether graph is a cugraph or not
[default = False]
Returns:
vlist (list)
List of integers corresponding to nodes
"""
if use_gpu:
vlist = range(G.number_of_vertices())
else:
vlist = list(G.vertices())
return vlist
def save_network(G, prefix = None, suffix = None, use_graphml = False,
use_gpu = False):
"""Save a network to disk
Args:
G (network)
Graph tool network
prefix (str)
Prefix for output file
use_graphml (bool)
Whether to output a graph-tool file
in graphml format
use_gpu (bool)
Whether graph is a cugraph or not
[default = False]
"""
file_name = prefix + "/" + os.path.basename(prefix)
if suffix is not None:
file_name = file_name + suffix
if use_gpu:
G.to_pandas_edgelist().to_csv(file_name + '.csv.gz',
compression='gzip', index = False)
else:
if use_graphml:
G.save(file_name + '.graphml',
fmt = 'graphml')
else:
G.save(file_name + '.gt',
fmt = 'gt')
def cugraph_to_graph_tool(G, rlist):
"""Save a network to disk
Args:
G (cugraph network)
Cugraph network
rlist (list)
List of sequence names
Returns:
G (graph-tool network)
Graph tool network
"""
edge_df = G.view_edge_list()
edge_tuple = edge_df[['src', 'dst']].values.tolist()
edge_weights = None
if 'weights' in edge_df.columns:
edge_weights = edge_df['weights'].values_host
G = construct_network_from_edge_list(rlist, rlist,
edge_tuple,
weights = edge_weights,
summarise=False)
vid = G.new_vertex_property('string',
vals = rlist)
G.vp.id = vid
return G
def sparse_mat_to_network(sparse_mat, rlist, use_gpu = False):
"""Generate a network from a lineage rank fit
Args:
sparse_mat (scipy or cupyx sparse matrix)
Sparse matrix of kNN from lineage fit
rlist (list)
List of sequence names
use_gpu (bool)
Whether GPU libraries should be used
Returns:
G (network)
Graph tool or cugraph network
"""
if use_gpu:
G_df = cudf.DataFrame(columns = ['source','destination','weights'])
G_df['source'] = sparse_mat.row
G_df['destination'] = sparse_mat.col
G_df['weights'] = sparse_mat.data
max_in_vertex_labels = len(rlist)-1
G = add_self_loop(G_df, max_in_vertex_labels, weights = True, renumber = False)
else:
connections = []
for (src,dst) in zip(sparse_mat.row,sparse_mat.col):
connections.append(src,dst)
G = construct_network_from_edge_list(rlist,
rlist,
connections,
weights=sparse_mat.data,
summarise=False)
return G
```
#### File: PopPUNK/PopPUNK/sketchlib.py
```python
import os
import sys
import subprocess
# additional
import collections
import pickle
import time
from tempfile import mkstemp
from multiprocessing import Pool, Lock
from functools import partial
from itertools import product
from glob import glob
from random import sample
import numpy as np
from scipy import optimize
import pp_sketchlib
import h5py
from .__init__ import SKETCHLIB_MAJOR, SKETCHLIB_MINOR, SKETCHLIB_PATCH
from .utils import iterDistRows
from .utils import readRfile
from .plot import plot_fit
sketchlib_exe = "poppunk_sketch"
def checkSketchlibVersion():
"""Checks that sketchlib can be run, and returns version
Returns:
version (str)
Version string
"""
try:
version = pp_sketchlib.version
# Older versions didn't export attributes
except AttributeError:
p = subprocess.Popen([sketchlib_exe + ' --version'], shell=True, stdout=subprocess.PIPE)
version = 0
for line in iter(p.stdout.readline, ''):
if line != '':
version = line.rstrip().decode().split(" ")[1]
break
sketchlib_version = [int(v) for v in version.split(".")]
if sketchlib_version[0] < SKETCHLIB_MAJOR or \
sketchlib_version[0] == SKETCHLIB_MAJOR and sketchlib_version[1] < SKETCHLIB_MINOR or \
sketchlib_version[0] == SKETCHLIB_MAJOR and sketchlib_version[1] == SKETCHLIB_MINOR and sketchlib_version[2] < SKETCHLIB_PATCH:
sys.stderr.write("This version of PopPUNK requires sketchlib "
"v" + str(SKETCHLIB_MAJOR) + \
"." + str(SKETCHLIB_MINOR) + \
"." + str(SKETCHLIB_PATCH) + " or higher\n")
sys.stderr.write("Continuing... but safety not guaranteed\n")
return version
def checkSketchlibLibrary():
"""Gets the location of the sketchlib library
Returns:
lib (str)
Location of sketchlib .so/.dyld
"""
sketchlib_loc = pp_sketchlib.__file__
return(sketchlib_loc)
def createDatabaseDir(outPrefix, kmers):
"""Creates the directory to write sketches to, removing old files if unnecessary
Args:
outPrefix (str)
output db prefix
kmers (list)
k-mer sizes in db
"""
# check for writing
if os.path.isdir(outPrefix):
# remove old database files if not needed
db_file = outPrefix + "/" + os.path.basename(outPrefix) + ".h5"
if os.path.isfile(db_file):
ref_db = h5py.File(db_file, 'r')
for sample_name in list(ref_db['sketches'].keys()):
knum = ref_db['sketches/' + sample_name].attrs['kmers']
remove_prev_db = False
for kmer_length in knum:
if not (kmer_length in knum):
sys.stderr.write("Previously-calculated k-mer size " + str(kmer_length) +
" not in requested range (" + str(knum) + ")\n")
remove_prev_db = True
break
if remove_prev_db:
sys.stderr.write("Removing old database " + db_file + "\n")
os.remove(db_file)
break
else:
try:
os.makedirs(outPrefix)
except OSError:
sys.stderr.write("Cannot create output directory\n")
sys.exit(1)
def getSketchSize(dbPrefix):
"""Determine sketch size, and ensures consistent in whole database
``sys.exit(1)`` is called if DBs have different sketch sizes
Args:
dbprefix (str)
Prefix for mash databases
Returns:
sketchSize (int)
sketch size (64x C++ definition)
codonPhased (bool)
whether the DB used codon phased seeds
"""
db_file = dbPrefix + "/" + os.path.basename(dbPrefix) + ".h5"
ref_db = h5py.File(db_file, 'r')
try:
codon_phased = ref_db['sketches'].attrs['codon_phased']
except KeyError:
codon_phased = False
prev_sketch = 0
for sample_name in list(ref_db['sketches'].keys()):
sketch_size = ref_db['sketches/' + sample_name].attrs['sketchsize64']
if prev_sketch == 0:
prev_sketch = sketch_size
elif sketch_size != prev_sketch:
sys.stderr.write("Problem with database; sketch sizes for sample " +
sample_name + " is " + str(prev_sketch) +
", but smaller kmers have sketch sizes of " + str(sketch_size) + "\n")
sys.exit(1)
return int(sketch_size), codon_phased
def getKmersFromReferenceDatabase(dbPrefix):
"""Get kmers lengths from existing database
Args:
dbPrefix (str)
Prefix for sketch DB files
Returns:
kmers (list)
List of k-mer lengths used in database
"""
db_file = dbPrefix + "/" + os.path.basename(dbPrefix) + ".h5"
ref_db = h5py.File(db_file, 'r')
prev_kmer_sizes = []
for sample_name in list(ref_db['sketches'].keys()):
kmer_size = ref_db['sketches/' + sample_name].attrs['kmers']
if len(prev_kmer_sizes) == 0:
prev_kmer_sizes = kmer_size
elif np.any(kmer_size != prev_kmer_sizes):
sys.stderr.write("Problem with database; kmer lengths inconsistent: " +
str(kmer_size) + " vs " + str(prev_kmer_sizes) + "\n")
sys.exit(1)
prev_kmer_sizes.sort()
kmers = np.asarray(prev_kmer_sizes)
return kmers
def readDBParams(dbPrefix):
"""Get kmers lengths and sketch sizes from existing database
Calls :func:`~getKmersFromReferenceDatabase` and :func:`~getSketchSize`
Uses passed values if db missing
Args:
dbPrefix (str)
Prefix for sketch DB files
Returns:
kmers (list)
List of k-mer lengths used in database
sketch_sizes (list)
List of sketch sizes used in database
codonPhased (bool)
whether the DB used codon phased seeds
"""
db_kmers = getKmersFromReferenceDatabase(dbPrefix)
if len(db_kmers) == 0:
sys.stderr.write("Couldn't find sketches in " + dbPrefix + "\n")
sys.exit(1)
else:
sketch_sizes, codon_phased = getSketchSize(dbPrefix)
return db_kmers, sketch_sizes, codon_phased
def getSeqsInDb(dbname):
"""Return an array with the sequences in the passed database
Args:
dbname (str)
Sketches database filename
Returns:
seqs (list)
List of sequence names in sketch DB
"""
seqs = []
ref = h5py.File(dbname, 'r')
for sample_name in list(ref['sketches'].keys()):
seqs.append(sample_name)
return seqs
def joinDBs(db1, db2, output, update_random = None):
"""Join two sketch databases with the low-level HDF5 copy interface
Args:
db1 (str)
Prefix for db1
db2 (str)
Prefix for db2
output (str)
Prefix for joined output
update_random (dict)
Whether to re-calculate the random object. May contain
control arguments strand_preserved and threads (see :func:`addRandom`)
"""
join_prefix = output + "/" + os.path.basename(output)
db1_name = db1 + "/" + os.path.basename(db1) + ".h5"
db2_name = db2 + "/" + os.path.basename(db2) + ".h5"
hdf1 = h5py.File(db1_name, 'r')
hdf2 = h5py.File(db2_name, 'r')
hdf_join = h5py.File(join_prefix + ".tmp.h5", 'w') # add .tmp in case join_name exists
# Can only copy into new group, so for second file these are appended one at a time
try:
hdf1.copy('sketches', hdf_join)
join_grp = hdf_join['sketches']
read_grp = hdf2['sketches']
for dataset in read_grp:
join_grp.copy(read_grp[dataset], dataset)
# Copy or update random matches
if update_random is not None:
threads = 1
strand_preserved = False
if isinstance(update_random, dict):
if "threads" in update_random:
threads = update_random["threads"]
if "strand_preserved" in update_random:
strand_preserved = update_random["strand_preserved"]
sequence_names = list(hdf_join['sketches'].keys())
kmer_size = hdf_join['sketches/' + sequence_names[0]].attrs['kmers']
# Need to close before adding random
hdf_join.close()
if len(sequence_names) > 2:
sys.stderr.write("Updating random match chances\n")
pp_sketchlib.addRandom(join_prefix + ".tmp",
sequence_names,
kmer_size,
not strand_preserved,
threads)
elif 'random' in hdf1:
hdf1.copy('random', hdf_join)
# Clean up
hdf1.close()
hdf2.close()
if update_random is None:
hdf_join.close()
except RuntimeError as e:
sys.stderr.write("ERROR: " + str(e) + "\n")
sys.stderr.write("Joining sketches failed, try running without --update-db\n")
sys.exit(1)
# Rename results to correct location
os.rename(join_prefix + ".tmp.h5", join_prefix + ".h5")
def removeFromDB(db_name, out_name, removeSeqs, full_names = False):
"""Remove sketches from the DB the low-level HDF5 copy interface
Args:
db_name (str)
Prefix for hdf database
out_name (str)
Prefix for output (pruned) database
removeSeqs (list)
Names of sequences to remove from database
full_names (bool)
If True, db_name and out_name are the full paths to h5 files
"""
removeSeqs = set(removeSeqs)
if not full_names:
db_file = db_name + "/" + os.path.basename(db_name) + ".h5"
out_file = out_name + "/" + os.path.basename(out_name) + ".tmp.h5"
else:
db_file = db_name
out_file = out_name
hdf_in = h5py.File(db_file, 'r')
hdf_out = h5py.File(out_file, 'w')
try:
if 'random' in hdf_in.keys():
hdf_in.copy('random', hdf_out)
out_grp = hdf_out.create_group('sketches')
read_grp = hdf_in['sketches']
for attr_name, attr_val in read_grp.attrs.items():
out_grp.attrs.create(attr_name, attr_val)
removed = []
for dataset in read_grp:
if dataset not in removeSeqs:
out_grp.copy(read_grp[dataset], dataset)
else:
removed.append(dataset)
except RuntimeError as e:
sys.stderr.write("ERROR: " + str(e) + "\n")
sys.stderr.write("Error while deleting sequence " + dataset + "\n")
sys.exit(1)
missed = removeSeqs.difference(set(removed))
if len(missed) > 0:
sys.stderr.write("WARNING: Did not find samples to remove:\n")
sys.stderr.write("\t".join(missed) + "\n")
# Clean up
hdf_in.close()
hdf_out.close()
def constructDatabase(assemblyList, klist, sketch_size, oPrefix,
threads, overwrite,
strand_preserved, min_count,
use_exact, qc_dict, calc_random = True,
codon_phased = False,
use_gpu = False, deviceid = 0):
"""Sketch the input assemblies at the requested k-mer lengths
A multithread wrapper around :func:`~runSketch`. Threads are used to either run multiple sketch
processes for each klist value, or increase the threads used by each ``mash sketch`` process
if len(klist) > threads.
Also calculates random match probability based on length of first genome
in assemblyList.
Args:
assemblyList (str)
File with locations of assembly files to be sketched
klist (list)
List of k-mer sizes to sketch
sketch_size (int)
Size of sketch (``-s`` option)
oPrefix (str)
Output prefix for resulting sketch files
threads (int)
Number of threads to use (default = 1)
overwrite (bool)
Whether to overwrite sketch DBs, if they already exist.
(default = False)
strand_preserved (bool)
Ignore reverse complement k-mers (default = False)
min_count (int)
Minimum count of k-mer in reads to include
(default = 0)
use_exact (bool)
Use exact count of k-mer appearance in reads
(default = False)
qc_dict (dict)
Dict containg QC settings
calc_random (bool)
Add random match chances to DB (turn off for queries)
codon_phased (bool)
Use codon phased seeds
(default = False)
use_gpu (bool)
Use GPU for read sketching
(default = False)
deviceid (int)
GPU device id
(default = 0)
Returns:
names (list)
List of names included in the database (some may be pruned due
to QC)
"""
# read file names
names, sequences = readRfile(assemblyList)
# create directory
dbname = oPrefix + "/" + os.path.basename(oPrefix)
dbfilename = dbname + ".h5"
if os.path.isfile(dbfilename) and overwrite == True:
sys.stderr.write("Overwriting db: " + dbfilename + "\n")
os.remove(dbfilename)
# generate sketches
pp_sketchlib.constructDatabase(dbname,
names,
sequences,
klist,
sketch_size,
codon_phased,
False,
not strand_preserved,
min_count,
use_exact,
threads,
use_gpu,
deviceid)
# QC sequences
if qc_dict['run_qc']:
filtered_names = sketchlibAssemblyQC(oPrefix,
names,
klist,
qc_dict,
strand_preserved,
threads)
else:
filtered_names = names
# Add random matches if required
# (typically on for reference, off for query)
if (calc_random):
addRandom(oPrefix,
filtered_names,
klist,
strand_preserved,
overwrite = True,
threads = threads)
# return filtered file names
return filtered_names
def addRandom(oPrefix, sequence_names, klist,
strand_preserved = False, overwrite = False, threads = 1):
"""Add chance of random match to a HDF5 sketch DB
Args:
oPrefix (str)
Sketch database prefix
sequence_names (list)
Names of sequences to include in calculation
klist (list)
List of k-mer sizes to sketch
strand_preserved (bool)
Set true to ignore rc k-mers
overwrite (str)
Set true to overwrite existing random match chances
threads (int)
Number of threads to use (default = 1)
"""
if len(sequence_names) <= 2:
sys.stderr.write("Cannot add random match chances with this few genomes\n")
else:
dbname = oPrefix + "/" + os.path.basename(oPrefix)
hdf_in = h5py.File(dbname + ".h5", 'r+')
if 'random' in hdf_in:
if overwrite:
del hdf_in['random']
else:
sys.stderr.write("Using existing random match chances in DB\n")
return
hdf_in.close()
pp_sketchlib.addRandom(dbname,
sequence_names,
klist,
not strand_preserved,
threads)
def queryDatabase(rNames, qNames, dbPrefix, queryPrefix, klist, self = True, number_plot_fits = 0,
threads = 1, use_gpu = False, deviceid = 0):
"""Calculate core and accessory distances between query sequences and a sketched database
For a reference database, runs the query against itself to find all pairwise
core and accessory distances.
Uses the relation :math:`pr(a, b) = (1-a)(1-c)^k`
To get the ref and query name for each row of the returned distances, call to the iterator
:func:`~PopPUNK.utils.iterDistRows` with the returned refList and queryList
Args:
rNames (list)
Names of references to query
qNames (list)
Names of queries
dbPrefix (str)
Prefix for reference mash sketch database created by :func:`~constructDatabase`
queryPrefix (str)
Prefix for query mash sketch database created by :func:`~constructDatabase`
klist (list)
K-mer sizes to use in the calculation
self (bool)
Set true if query = ref
(default = True)
number_plot_fits (int)
If > 0, the number of k-mer length fits to plot (saved as pdfs).
Takes random pairs of comparisons and calls :func:`~PopPUNK.plot.plot_fit`
(default = 0)
threads (int)
Number of threads to use in the mash process
(default = 1)
use_gpu (bool)
Use a GPU for querying
(default = False)
deviceid (int)
Index of the CUDA GPU device to use
(default = 0)
Returns:
distMat (numpy.array)
Core distances (column 0) and accessory distances (column 1) between
refList and queryList
"""
ref_db = dbPrefix + "/" + os.path.basename(dbPrefix)
if self:
if dbPrefix != queryPrefix:
raise RuntimeError("Must use same db for self query")
qNames = rNames
# Calls to library
distMat = pp_sketchlib.queryDatabase(ref_db, ref_db, rNames, rNames, klist,
True, False, threads, use_gpu, deviceid)
# option to plot core/accessory fits. Choose a random number from cmd line option
if number_plot_fits > 0:
jacobian = -np.hstack((np.ones((klist.shape[0], 1)), klist.reshape(-1, 1)))
for plot_idx in range(number_plot_fits):
example = sample(rNames, k=2)
raw = np.zeros(len(klist))
corrected = np.zeros(len(klist))
raw = pp_sketchlib.queryDatabase(ref_db,
ref_db,
[example[0]],
[example[1]],
klist,
random_correct = False,
jaccard = True,
num_threads = threads,
use_gpu = False)
corrected = pp_sketchlib.queryDatabase(ref_db,
ref_db,
[example[0]],
[example[1]],
klist,
random_correct = True,
jaccard = True,
num_threads = threads,
use_gpu = False)
raw_fit = fitKmerCurve(raw[0], klist, jacobian)
corrected_fit = fitKmerCurve(corrected[0], klist, jacobian)
plot_fit(klist,
raw[0],
raw_fit,
corrected[0],
corrected_fit,
dbPrefix + "/" + dbPrefix + "_fit_example_" + str(plot_idx + 1),
"Example fit " + str(plot_idx + 1) + " - " + example[0] + " vs. " + example[1])
else:
duplicated = set(rNames).intersection(set(qNames))
if len(duplicated) > 0:
sys.stderr.write("Sample names in query are contained in reference database:\n")
sys.stderr.write("\n".join(duplicated))
sys.stderr.write("Unique names are required!\n")
sys.exit(1)
# Calls to library
query_db = queryPrefix + "/" + os.path.basename(queryPrefix)
distMat = pp_sketchlib.queryDatabase(ref_db, query_db, rNames, qNames, klist,
True, False, threads, use_gpu, deviceid)
# option to plot core/accessory fits. Choose a random number from cmd line option
if number_plot_fits > 0:
jacobian = -np.hstack((np.ones((klist.shape[0], 1)), klist.reshape(-1, 1)))
ref_examples = sample(rNames, k = number_plot_fits)
query_examples = sample(qNames, k = number_plot_fits)
raw = pp_sketchlib.queryDatabase(ref_db,
query_db,
ref_examples,
query_examples,
klist,
random_correct = False,
jaccard = True,
num_threads = threads,
use_gpu = False)
corrected = pp_sketchlib.queryDatabase(ref_db,
query_db,
ref_examples,
query_examples,
klist,
random_correct = True,
jaccard = True,
num_threads = threads,
use_gpu = False)
for plot_idx in range(number_plot_fits):
raw_fit = fitKmerCurve(raw[plot_idx], klist, jacobian)
corrected_fit = fitKmerCurve(corrected[plot_idx], klist, jacobian)
plot_fit(klist,
raw[plot_idx],
raw_fit,
corrected[plot_idx],
corrected_fit,
queryPrefix + "/" + queryPrefix + "_fit_example_" + str(plot_idx + 1),
"Example fit " + str(plot_idx + 1) + " - " + ref_examples[plot_idx] + \
" vs. " + query_examples[plot_idx])
return distMat
def pickTypeIsolate(prefix, names):
"""Selects a type isolate as that with a minimal proportion
of missing data.
Args:
prefix (str)
Prefix of output files
names (list)
Names of samples to QC
Returns:
type_isolate (str)
Name of isolate selected as reference
"""
# open databases
db_name = prefix + '/' + os.path.basename(prefix) + '.h5'
hdf_in = h5py.File(db_name, 'r')
min_prop_n = 1.0
type_isolate = None
try:
# process data structures
read_grp = hdf_in['sketches']
# iterate through sketches
for dataset in read_grp:
if hdf_in['sketches'][dataset].attrs['missing_bases']/hdf_in['sketches'][dataset].attrs['length'] < min_prop_n:
min_prop_n = hdf_in['sketches'][dataset].attrs['missing_bases']/hdf_in['sketches'][dataset].attrs['length']
type_isolate = dataset
if min_prop_n == 0.0:
break
# if failure still close files to avoid corruption
except:
hdf_in.close()
sys.stderr.write('Problem processing h5 databases during QC - aborting\n')
print("Unexpected error:", sys.exc_info()[0], file = sys.stderr)
raise
return type_isolate
def sketchlibAssemblyQC(prefix, names, klist, qc_dict, strand_preserved, threads):
"""Calculates random match probability based on means of genomes
in assemblyList, and looks for length outliers.
Args:
prefix (str)
Prefix of output files
names (list)
Names of samples to QC
klist (list)
List of k-mer sizes to sketch
qc_dict (dict)
Dictionary of QC parameters
strand_preserved (bool)
Ignore reverse complement k-mers (default = False)
threads (int)
Number of threads to use in parallelisation
Returns:
retained (list)
List of sequences passing QC filters
"""
sys.stderr.write("Running QC on sketches\n")
# open databases
db_name = prefix + '/' + os.path.basename(prefix) + '.h5'
hdf_in = h5py.File(db_name, 'r')
# try/except structure to prevent h5 corruption
failed_samples = False
try:
# process data structures
read_grp = hdf_in['sketches']
seq_length = {}
seq_ambiguous = {}
retained = []
failed = []
# iterate through sketches
for dataset in read_grp:
if dataset in names:
# test thresholds
remove = False
seq_length[dataset] = hdf_in['sketches'][dataset].attrs['length']
seq_ambiguous[dataset] = hdf_in['sketches'][dataset].attrs['missing_bases']
# calculate thresholds
# get mean length
genome_lengths = np.fromiter(seq_length.values(), dtype = int)
mean_genome_length = np.mean(genome_lengths)
# calculate length threshold unless user-supplied
if qc_dict['length_range'][0] is None:
lower_length = mean_genome_length - \
qc_dict['length_sigma'] * np.std(genome_lengths)
upper_length = mean_genome_length + \
qc_dict['length_sigma'] * np.std(genome_lengths)
else:
lower_length, upper_length = qc_dict['length_range']
# open file to report QC failures
with open(prefix + '/' + os.path.basename(prefix) + '_qcreport.txt', 'a+') as qc_file:
# iterate through and filter
for dataset in seq_length.keys():
# determine if sequence passes filters
remove = False
if seq_length[dataset] < lower_length:
remove = True
qc_file.write(dataset + '\tBelow lower length threshold\n')
elif seq_length[dataset] > upper_length:
remove = True
qc_file.write(dataset + '\tAbove upper length threshold\n')
if qc_dict['upper_n'] is not None and seq_ambiguous[dataset] > qc_dict['upper_n']:
remove = True
qc_file.write(dataset + '\tAmbiguous sequence too high\n')
elif seq_ambiguous[dataset] > qc_dict['prop_n'] * seq_length[dataset]:
remove = True
qc_file.write(dataset + '\tAmbiguous sequence too high\n')
if remove:
sys.stderr.write(dataset + ' failed QC\n')
failed_samples = True
failed.append(dataset)
else:
retained.append(dataset)
# retain sketches of failed samples
if qc_dict['retain_failures']:
removeFromDB(db_name,
prefix + '/' + 'failed.' + os.path.basename(prefix) + '.h5',
retained,
full_names = True)
# new database file if pruning
if qc_dict['qc_filter'] == 'prune':
filtered_db_name = prefix + '/' + 'filtered.' + os.path.basename(prefix) + '.h5'
removeFromDB(db_name,
prefix + '/' + 'filtered.' + os.path.basename(prefix) + '.h5',
failed,
full_names = True)
os.rename(filtered_db_name, db_name)
hdf_in.close()
# if failure still close files to avoid corruption
except:
hdf_in.close()
sys.stderr.write('Problem processing h5 databases during QC - aborting\n')
print("Unexpected error:", sys.exc_info()[0], file = sys.stderr)
raise
# stop if at least one sample fails QC and option is not continue/prune
if failed_samples and qc_dict['qc_filter'] == 'stop':
sys.stderr.write('Sequences failed QC filters - details in ' + \
prefix + '/' + os.path.basename(prefix) + \
'_qcreport.txt\n')
sys.exit(1)
elif qc_dict['qc_filter'] == 'continue':
retained = retained + failed
# stop if no sequences pass QC
if len(retained) == 0:
sys.stderr.write('No sequences passed QC filters - please adjust your settings\n')
sys.exit(1)
# remove random matches if already present
if 'random' in hdf_in:
hdf_in.close()
hdf_in = h5py.File(db_name, 'r+')
del hdf_in['random']
hdf_in.close()
# This gives back retained in the same order as names
retained = [x for x in names if x in frozenset(retained)]
# stop if type sequence does not pass QC or is absent
if qc_dict['type_isolate'] is not None and qc_dict['type_isolate'] not in retained:
sys.stderr.write('Type isolate ' + qc_dict['type_isolate'] + ' not found in isolates after QC; check '
'name of type isolate and QC options\n')
sys.exit(1)
return retained
def fitKmerCurve(pairwise, klist, jacobian):
"""Fit the function :math:`pr = (1-a)(1-c)^k`
Supply ``jacobian = -np.hstack((np.ones((klist.shape[0], 1)), klist.reshape(-1, 1)))``
Args:
pairwise (numpy.array)
Proportion of shared k-mers at k-mer values in klist
klist (list)
k-mer sizes used
jacobian (numpy.array)
Should be set as above (set once to try and save memory)
Returns:
transformed_params (numpy.array)
Column with core and accessory distance
"""
# curve fit pr = (1-a)(1-c)^k
# log pr = log(1-a) + k*log(1-c)
# a = p[0]; c = p[1] (will flip on return)
try:
distFit = optimize.least_squares(fun=lambda p, x, y: y - (p[0] + p[1] * x),
x0=[0.0, -0.01],
jac=lambda p, x, y: jacobian,
args=(klist, np.log(pairwise)),
bounds=([-np.inf, -np.inf], [0, 0]))
transformed_params = 1 - np.exp(distFit.x)
except ValueError as e:
sys.stderr.write("Fitting k-mer curve failed: " + format(e) +
"\nWith mash input " +
np.array2string(pairwise, precision=4, separator=',',suppress_small=True) +
"\nCheck for low quality input genomes\n")
exit(0)
# Return core, accessory
return(np.flipud(transformed_params))
```
#### File: PopPUNK/PopPUNK/utils.py
```python
import os
import sys
# additional
import pickle
import subprocess
from collections import defaultdict
from itertools import chain
from tempfile import mkstemp
from functools import partial
import contextlib
import poppunk_refine
import numpy as np
import pandas as pd
import h5py
try:
import cudf
import rmm
import cupy
from numba import cuda
gpu_lib = True
except ImportError as e:
gpu_lib = False
import pp_sketchlib
def setGtThreads(threads):
import graph_tool.all as gt
# Check on parallelisation of graph-tools
if gt.openmp_enabled():
gt.openmp_set_num_threads(threads)
sys.stderr.write('\nGraph-tools OpenMP parallelisation enabled:')
sys.stderr.write(' with ' + str(gt.openmp_get_num_threads()) + ' threads\n')
# thanks to Laurent LAPORTE on SO
@contextlib.contextmanager
def set_env(**environ):
"""
Temporarily set the process environment variables.
>>> with set_env(PLUGINS_DIR=u'test/plugins'):
... "PLUGINS_DIR" in os.environ
True
>>> "PLUGINS_DIR" in os.environ
False
"""
old_environ = dict(os.environ)
os.environ.update(environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
# Use partials to set up slightly different function calls between
# both possible backends
def setupDBFuncs(args, min_count, qc_dict):
"""Wraps common database access functions from sketchlib and mash,
to try and make their API more similar
Args:
args (argparse.opts)
Parsed command lines options
min_count (int)
Minimum k-mer count for reads
qc_dict (dict)
Table of parameters for QC function
Returns:
dbFuncs (dict)
Functions with consistent arguments to use as the database API
"""
from .sketchlib import checkSketchlibVersion
from .sketchlib import createDatabaseDir
from .sketchlib import joinDBs
from .sketchlib import constructDatabase as constructDatabaseSketchlib
from .sketchlib import queryDatabase as queryDatabaseSketchlib
from .sketchlib import readDBParams
from .sketchlib import getSeqsInDb
backend = "sketchlib"
version = checkSketchlibVersion()
constructDatabase = partial(constructDatabaseSketchlib,
strand_preserved = args.strand_preserved,
min_count = args.min_kmer_count,
use_exact = args.exact_count,
qc_dict = qc_dict,
use_gpu = args.gpu_sketch,
deviceid = args.deviceid)
queryDatabase = partial(queryDatabaseSketchlib,
use_gpu = args.gpu_dist,
deviceid = args.deviceid)
# Dict of DB access functions for assign_query (which is out of scope)
dbFuncs = {'createDatabaseDir': createDatabaseDir,
'joinDBs': joinDBs,
'constructDatabase': constructDatabase,
'queryDatabase': queryDatabase,
'readDBParams': readDBParams,
'getSeqsInDb': getSeqsInDb,
'backend': backend,
'backend_version': version
}
return dbFuncs
def storePickle(rlist, qlist, self, X, pklName):
"""Saves core and accessory distances in a .npy file, names in a .pkl
Called during ``--create-db``
Args:
rlist (list)
List of reference sequence names (for :func:`~iterDistRows`)
qlist (list)
List of query sequence names (for :func:`~iterDistRows`)
self (bool)
Whether an all-vs-all self DB (for :func:`~iterDistRows`)
X (numpy.array)
n x 2 array of core and accessory distances
pklName (str)
Prefix for output files
"""
with open(pklName + ".pkl", 'wb') as pickle_file:
pickle.dump([rlist, qlist, self], pickle_file)
np.save(pklName + ".npy", X)
def readPickle(pklName, enforce_self=False, distances=True):
"""Loads core and accessory distances saved by :func:`~storePickle`
Called during ``--fit-model``
Args:
pklName (str)
Prefix for saved files
enforce_self (bool)
Error if self == False
[default = True]
distances (bool)
Read the distance matrix
[default = True]
Returns:
rlist (list)
List of reference sequence names (for :func:`~iterDistRows`)
qlist (list)
List of query sequence names (for :func:`~iterDistRows`)
self (bool)
Whether an all-vs-all self DB (for :func:`~iterDistRows`)
X (numpy.array)
n x 2 array of core and accessory distances
"""
with open(pklName + ".pkl", 'rb') as pickle_file:
rlist, qlist, self = pickle.load(pickle_file)
if enforce_self and not self:
sys.stderr.write("Old distances " + pklName + ".npy not complete\n")
sys.stderr.exit(1)
if distances:
X = np.load(pklName + ".npy")
else:
X = None
return rlist, qlist, self, X
def iterDistRows(refSeqs, querySeqs, self=True):
"""Gets the ref and query ID for each row of the distance matrix
Returns an iterable with ref and query ID pairs by row.
Args:
refSeqs (list)
List of reference sequence names.
querySeqs (list)
List of query sequence names.
self (bool)
Whether a self-comparison, used when constructing a database.
Requires refSeqs == querySeqs
Default is True
Returns:
ref, query (str, str)
Iterable of tuples with ref and query names for each distMat row.
"""
if self:
if refSeqs != querySeqs:
raise RuntimeError('refSeqs must equal querySeqs for db building (self = true)')
for i, ref in enumerate(refSeqs):
for j in range(i + 1, len(refSeqs)):
yield(refSeqs[j], ref)
else:
for query in querySeqs:
for ref in refSeqs:
yield(ref, query)
def listDistInts(refSeqs, querySeqs, self=True):
"""Gets the ref and query ID for each row of the distance matrix
Returns an iterable with ref and query ID pairs by row.
Args:
refSeqs (list)
List of reference sequence names.
querySeqs (list)
List of query sequence names.
self (bool)
Whether a self-comparison, used when constructing a database.
Requires refSeqs == querySeqs
Default is True
Returns:
ref, query (str, str)
Iterable of tuples with ref and query names for each distMat row.
"""
num_ref = len(refSeqs)
num_query = len(querySeqs)
if self:
if refSeqs != querySeqs:
raise RuntimeError('refSeqs must equal querySeqs for db building (self = true)')
for i in range(num_ref):
for j in range(i + 1, num_ref):
yield(j, i)
else:
comparisons = [(0,0)] * (len(refSeqs) * len(querySeqs))
for i in range(num_query):
for j in range(num_ref):
yield(j, i)
return comparisons
def qcDistMat(distMat, refList, queryList, ref_db, prefix, qc_dict):
"""Checks distance matrix for outliers.
Args:
distMat (np.array)
Core and accessory distances
refList (list)
Reference labels
queryList (list)
Query labels (or refList if self)
ref_db (str)
Prefix of reference database
prefix (str)
Prefix of output files
qc_dict (dict)
Dict of QC options
Returns:
seq_names_passing (list)
List of isolates passing QC distance filters
distMat ([n,2] numpy ndarray)
Filtered long form distance matrix
"""
# avoid circular import
from .prune_db import prune_distance_matrix
from .sketchlib import removeFromDB
from .sketchlib import pickTypeIsolate
# Create overall list of sequences
if refList == queryList:
seq_names_passing = refList
else:
seq_names_passing = refList + queryList
# Sequences to remove
to_prune = []
# Create output directory if it does not exist already
if not os.path.isdir(prefix):
try:
os.makedirs(prefix)
except OSError:
sys.stderr.write("Cannot create output directory " + prefix + "\n")
sys.exit(1)
# Pick type isolate if not supplied
if qc_dict['type_isolate'] is None:
qc_dict['type_isolate'] = pickTypeIsolate(ref_db, seq_names_passing)
sys.stderr.write('Selected type isolate for distance QC is ' + qc_dict['type_isolate'] + '\n')
# First check with numpy, which is quicker than iterating over everything
#long_distance_rows = np.where([(distMat[:, 0] > qc_dict['max_pi_dist']) | (distMat[:, 1] > qc_dict['max_a_dist'])])[1].tolist()
long_distance_rows = np.where([(distMat[:, 0] > qc_dict['max_pi_dist']) | (distMat[:, 1] > qc_dict['max_a_dist'])],0,1)[0].tolist()
long_edges = poppunk_refine.generateTuples(long_distance_rows,
0,
self = (refList == queryList),
num_ref = len(refList),
int_offset = 0)
if len(long_edges) > 0:
# Prune sequences based on reference sequence
for (s,t) in long_edges:
if seq_names_passing[s] == qc_dict['type_isolate']:
to_prune.append(seq_names_passing[t])
elif seq_names_passing[t] == qc_dict['type_isolate']:
to_prune.append(seq_names_passing[s])
# prune based on distance from reference if provided
if qc_dict['qc_filter'] == 'stop' and len(to_prune) > 0:
sys.stderr.write('Outlier distances exceed QC thresholds; prune sequences or raise thresholds\n')
sys.stderr.write('Problem distances involved sequences ' + ';'.join(to_prune) + '\n')
sys.exit(1)
elif qc_dict['qc_filter'] == 'prune' and len(to_prune) > 0:
if qc_dict['type_isolate'] is None:
sys.stderr.write('Distances exceeded QC thresholds but no reference isolate supplied\n')
sys.stderr.write('Problem distances involved sequences ' + ';'.join(to_prune) + '\n')
sys.exit(1)
else:
# Remove sketches
db_name = ref_db + '/' + os.path.basename(ref_db) + '.h5'
filtered_db_name = prefix + '/' + 'filtered.' + os.path.basename(prefix) + '.h5'
removeFromDB(db_name,
filtered_db_name,
to_prune,
full_names = True)
os.rename(filtered_db_name, db_name)
# Remove from distance matrix
seq_names_passing, distMat = prune_distance_matrix(seq_names_passing,
to_prune,
distMat,
prefix + "/" + os.path.basename(prefix) + ".dists")
# Remove from reflist
sys.stderr.write('Pruned from the database after failing distance QC: ' + ';'.join(to_prune) + '\n')
else:
storePickle(seq_names_passing, seq_names_passing, True, distMat, prefix + "/" + os.path.basename(prefix) + ".dists")
return seq_names_passing, distMat
def readIsolateTypeFromCsv(clustCSV, mode = 'clusters', return_dict = False):
"""Read cluster definitions from CSV file.
Args:
clustCSV (str)
File name of CSV with isolate assignments
return_type (str)
If True, return a dict with sample->cluster instead
of sets
Returns:
clusters (dict)
Dictionary of cluster assignments (keys are cluster names, values are
sets containing samples in the cluster). Or if return_dict is set keys
are sample names, values are cluster assignments.
"""
# data structures
if return_dict:
clusters = defaultdict(dict)
else:
clusters = {}
# read CSV
clustersCsv = pd.read_csv(clustCSV, index_col = 0, quotechar='"')
# select relevant columns according to mode
if mode == 'clusters':
type_columns = [n for n,col in enumerate(clustersCsv.columns) if ('Cluster' in col)]
elif mode == 'lineages':
type_columns = [n for n,col in enumerate(clustersCsv.columns) if ('Rank_' in col or 'overall' in col)]
elif mode == 'external':
if len(clustersCsv.columns) == 1:
type_columns = [0]
elif len(clustersCsv.columns) > 1:
type_columns = range((len(clustersCsv.columns)-1))
else:
sys.stderr.write('Unknown CSV reading mode: ' + mode + '\n')
sys.exit(1)
# read file
for row in clustersCsv.itertuples():
for cls_idx in type_columns:
cluster_name = clustersCsv.columns[cls_idx]
cluster_name = cluster_name.replace('__autocolour','')
if return_dict:
clusters[cluster_name][str(row.Index)] = str(row[cls_idx + 1])
else:
if cluster_name not in clusters.keys():
clusters[cluster_name] = defaultdict(set)
clusters[cluster_name][str(row[cls_idx + 1])].add(row.Index)
# return data structure
return clusters
def joinClusterDicts(d1, d2):
"""Join two dictionaries returned by :func:`~readIsolateTypeFromCsv` with
return_dict = True. Useful for concatenating ref and query assignments
Args:
d1 (dict of dicts)
First dictionary to concat
d2 (dict of dicts)
Second dictionary to concat
Returns:
d1 (dict of dicts)
d1 with d2 appended
"""
if d1.keys() != d2.keys():
sys.stderr.write("Cluster columns not compatible\n")
sys.exit(1)
for column in d1.keys():
# Combine dicts: https://stackoverflow.com/a/15936211
d1[column] = \
dict(chain.from_iterable(d.items() for d in (d1[column], d2[column])))
return d1
def update_distance_matrices(refList, distMat, queryList = None, query_ref_distMat = None,
query_query_distMat = None, threads = 1):
"""Convert distances from long form (1 matrix with n_comparisons rows and 2 columns)
to a square form (2 NxN matrices), with merging of query distances if necessary.
Args:
refList (list)
List of references
distMat (numpy.array)
Two column long form list of core and accessory distances
for pairwise comparisons between reference db sequences
queryList (list)
List of queries
query_ref_distMat (numpy.array)
Two column long form list of core and accessory distances
for pairwise comparisons between queries and reference db
sequences
query_query_distMat (numpy.array)
Two column long form list of core and accessory distances
for pairwise comparisons between query sequences
threads (int)
Number of threads to use
Returns:
seqLabels (list)
Combined list of reference and query sequences
coreMat (numpy.array)
NxN array of core distances for N sequences
accMat (numpy.array)
NxN array of accessory distances for N sequences
"""
seqLabels = refList
if queryList is not None:
seqLabels = seqLabels + queryList
if queryList == None:
coreMat = pp_sketchlib.longToSquare(distMat[:, [0]], threads)
accMat = pp_sketchlib.longToSquare(distMat[:, [1]], threads)
else:
coreMat = pp_sketchlib.longToSquareMulti(distMat[:, [0]],
query_ref_distMat[:, [0]],
query_query_distMat[:, [0]],
threads)
accMat = pp_sketchlib.longToSquareMulti(distMat[:, [1]],
query_ref_distMat[:, [1]],
query_query_distMat[:, [1]],
threads)
# return outputs
return seqLabels, coreMat, accMat
def readRfile(rFile, oneSeq=False):
"""Reads in files for sketching. Names and sequence, tab separated
Args:
rFile (str)
File with locations of assembly files to be sketched
oneSeq (bool)
Return only the first sequence listed, rather than a list
(used with mash)
Returns:
names (list)
Array of sequence names
sequences (list of lists)
Array of sequence files
"""
names = []
sequences = []
with open(rFile, 'rU') as refFile:
for refLine in refFile:
rFields = refLine.rstrip().split("\t")
if len(rFields) < 2:
sys.stderr.write("Input reference list is misformatted\n"
"Must contain sample name and file, tab separated\n")
sys.exit(1)
if "/" in rFields[0]:
sys.stderr.write("Sample names may not contain slashes\n")
sys.exit(1)
names.append(rFields[0])
sample_files = []
for sequence in rFields[1:]:
sample_files.append(sequence)
# Take first of sequence list
if oneSeq:
if len(sample_files) > 1:
sys.stderr.write("Multiple sequence found for " + rFields[0] +
". Only using first\n")
sequences.append(sample_files[0])
else:
sequences.append(sample_files)
# Process names to ensure compatibility with downstream software
names = isolateNameToLabel(names)
if len(set(names)) != len(names):
seen = set()
dupes = set(x for x in names if x in seen or seen.add(x))
sys.stderr.write("Input contains duplicate names! All names must be unique\n")
sys.stderr.write("Non-unique names are " + ",".join(dupes) + "\n")
sys.exit(1)
# Names are sorted on return
# We have had issues (though they should be fixed) with unordered input
# not matching the database. This should help simplify things
list_iterable = zip(names, sequences)
sorted_names = sorted(list_iterable)
tuples = zip(*sorted_names)
names, sequences = [list(r_tuple) for r_tuple in tuples]
return (names, sequences)
def isolateNameToLabel(names):
"""Function to process isolate names to labels
appropriate for visualisation.
Args:
names (list)
List of isolate names.
Returns:
labels (list)
List of isolate labels.
"""
# useful to have as a function in case we
# want to remove certain characters
labels = [name.split('/')[-1].replace('.','_').replace(':','').replace('(','_').replace(')','_') \
for name in names]
return labels
def createOverallLineage(rank_list, lineage_clusters):
# process multirank lineages
overall_lineages = {'Rank_' + str(rank):{} for rank in rank_list}
overall_lineages['overall'] = {}
isolate_list = lineage_clusters[rank_list[0]].keys()
for isolate in isolate_list:
overall_lineage = None
for rank in rank_list:
overall_lineages['Rank_' + str(rank)][isolate] = lineage_clusters[rank][isolate]
if overall_lineage is None:
overall_lineage = str(lineage_clusters[rank][isolate])
else:
overall_lineage = overall_lineage + '-' + str(lineage_clusters[rank][isolate])
overall_lineages['overall'][isolate] = overall_lineage
return overall_lineages
def transformLine(s, mean0, mean1):
"""Return x and y co-ordinates for traversing along a line between mean0 and mean1, parameterised by
a single scalar distance s from the start point mean0.
Args:
s (float)
Distance along line from mean0
mean0 (numpy.array)
Start position of line (x0, y0)
mean1 (numpy.array)
End position of line (x1, y1)
Returns:
x (float)
The Cartesian x-coordinate
y (float)
The Cartesian y-coordinate
"""
dx = mean1[0] - mean0[0]
dy = mean1[1] - mean0[1]
ds = np.sqrt(dx**2 + dy**2)
x = mean0[0] + s * (dx / ds)
y = mean0[1] + s * (dy / ds)
return np.array([x, y])
def decisionBoundary(intercept, gradient):
"""Returns the co-ordinates where the triangle the decision boundary forms
meets the x- and y-axes.
Args:
intercept (numpy.array)
Cartesian co-ordinates of point along line (:func:`~transformLine`)
which intercepts the boundary
gradient (float)
Gradient of the line
Returns:
x (float)
The x-axis intercept
y (float)
The y-axis intercept
"""
x = intercept[0] + intercept[1] * gradient
y = intercept[1] + intercept[0] / gradient
return(x, y)
def check_and_set_gpu(use_gpu, gpu_lib, quit_on_fail = False):
"""Check GPU libraries can be loaded and set managed memory.
Args:
use_gpu (bool)
Whether GPU packages have been requested
gpu_lib (bool)
Whether GPU packages are available
Returns:
use_gpu (bool)
Whether GPU packages can be used
"""
# load CUDA libraries
if use_gpu and not gpu_lib:
if quit_on_fail:
sys.stderr.write('Unable to load GPU libraries; exiting\n')
sys.exit(1)
else:
sys.stderr.write('Unable to load GPU libraries; using CPU libraries '
'instead\n')
use_gpu = False
# Set memory management for large networks
if use_gpu:
rmm.reinitialize(managed_memory=True)
cudf.set_allocator("managed")
if "cupy" in sys.modules:
cupy.cuda.set_allocator(rmm.rmm_cupy_allocator)
if "cuda" in sys.modules:
cuda.set_memory_manager(rmm.RMMNumbaManager)
assert(rmm.is_initialized())
return use_gpu
def read_rlist_from_distance_pickle(fn, allow_non_self = True):
"""Return the list of reference sequences from a distance pickle.
Args:
fn (str)
Name of distance pickle
allow_non_self (bool)
Whether non-self distance datasets are permissible
Returns:
rlist (list)
List of reference sequence names
"""
with open(fn, 'rb') as pickle_file:
rlist, qlist, self = pickle.load(pickle_file)
if not allow_non_self and not self:
sys.stderr.write("Thi analysis requires an all-v-all"
" distance dataset\n")
sys.exit(1)
return rlist
```
#### File: PopPUNK/scripts/poppunk_easy_run.py
```python
import sys
import argparse
import subprocess
# command line parsing
def get_options():
parser = argparse.ArgumentParser(description='Easy run mode (create + dbscan + refine)',
prog='easy_run')
# input options
parser.add_argument('--r-files', help='List of sequence names and files (as for --r-files)')
parser.add_argument('--output', help='Prefix for output files')
parser.add_argument('--analysis-args', help="Other arguments to pass to poppunk. e.g. "
"'--min-k 13 --max-k 29'")
parser.add_argument('--viz', help = "Run visualisation of output", default = False, action = "store_true")
parser.add_argument('--viz-args', help = "Options to use for visualisation")
parser.add_argument('--poppunk-exe', help="Location of poppunk executable. Use "
"'python poppunk-runner.py' to run from source tree")
parser.add_argument('--viz-exe', help = "Location of poppunk_visualisation executable")
return parser.parse_args()
# main code
if __name__ == "__main__":
# Check input ok
args = get_options()
if args.poppunk_exe is None:
poppunk = "poppunk"
else:
poppunk = args.poppunk_exe
if args.analysis_args is None:
pp_args = ""
else:
pp_args = args.analysis_args
sys.stderr.write("Running --create-db\n")
create_db_cmd = poppunk + " --create-db --r-files " + args.r_files + " --output " + args.output + " " + pp_args
sys.stderr.write(create_db_cmd + "\n")
subprocess.run(create_db_cmd, shell=True, check=True)
sys.stderr.write("Running --fit-model dbscan\n")
dbscan_cmd = poppunk + " --fit-model dbscan --ref-db " + args.output + " --output " + args.output + " " + pp_args
sys.stderr.write(dbscan_cmd + "\n")
subprocess.run(dbscan_cmd, shell=True, check=True)
sys.stderr.write("Running --fit-model refine\n")
refine_cmd = poppunk + " --fit-model refine --ref-db " + args.output + " --output " + args.output + " " + pp_args
sys.stderr.write(refine_cmd + "\n")
subprocess.run(refine_cmd, shell=True, check=True)
if args.viz:
if args.viz_exe is None:
poppunk_viz = "poppunk_visualise"
else:
poppunk_viz = args.viz_exe
if args.viz_args is None:
viz_extra = ""
else:
viz_extra = args.viz_args
viz_cmd = poppunk_viz + " --ref-db " + args.output + " --output " + args.output + " " + viz_extra
sys.stderr.write(viz_cmd + "\n")
subprocess.run(viz_cmd, shell=True, check=True)
sys.exit(0)
```
#### File: PopPUNK/scripts/poppunk_extract_components.py
```python
import sys
from scipy.stats import rankdata
import argparse
# command line parsing
def get_options():
parser = argparse.ArgumentParser(description='Extract graphml files of each component for '
'individual visualisation',
prog='extract_components')
# input options
parser.add_argument('--graph', help='Input graph pickle (.gt)')
parser.add_argument('--output', help='Prefix for output files')
return parser.parse_args()
# main code
if __name__ == "__main__":
# Check input ok
args = get_options()
# open stored graph
import graph_tool.all as gt
G = gt.load_graph(args.graph)
# extract individual components
component_assignments, component_frequencies = gt.label_components(G)
component_frequency_ranks = len(component_frequencies) - rankdata(component_frequencies, method = 'ordinal').astype(int)
sys.stderr.write("Writing " + str(len(component_frequencies)) + " components "
"in reverse order of size\n")
# extract as GraphView objects and print
for component_index in range(len(component_frequency_ranks)):
component_gv = gt.GraphView(G, vfilt = component_assignments.a == component_index)
component_G = gt.Graph(component_gv, prune = True)
component_fn = args.output + ".component_" + str(component_frequency_ranks[component_index]) + ".graphml"
component_G.save(component_fn, fmt = 'graphml')
sys.exit(0)
```
#### File: johnlees/PopPUNK/setup.py
```python
from setuptools import setup, find_packages
from codecs import open
from os import path
import os, sys
import re
import io
import platform
import subprocess
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.install import install
from distutils.version import LooseVersion
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable,
'-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON']
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='poppunk',
version=find_version("PopPUNK/__init__.py"),
description='PopPUNK (POPulation Partitioning Using Nucleotide Kmers)',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/johnlees/PopPUNK',
author='<NAME> and <NAME>',
author_email='<EMAIL>',
license='Apache Software License',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.8',
],
python_requires='>=3.8.0',
keywords='bacteria genomics population-genetics k-mer',
packages=['PopPUNK'],
entry_points={
"console_scripts": [
'poppunk = PopPUNK.__main__:main',
'poppunk_assign = PopPUNK.assign:main',
'poppunk_visualise = PopPUNK.visualise:main',
'poppunk_mst = PopPUNK.sparse_mst:main',
'poppunk_prune = PopPUNK.prune_db:main',
'poppunk_references = PopPUNK.reference_pick:main',
'poppunk_tsne = PopPUNK.tsne:main',
'poppunk_info = PopPUNK.info:main'
]
},
scripts=['scripts/poppunk_calculate_rand_indices.py',
'scripts/poppunk_extract_components.py',
'scripts/poppunk_calculate_silhouette.py',
'scripts/poppunk_batch_mst.py',
'scripts/poppunk_extract_distances.py',
'scripts/poppunk_add_weights.py',
'scripts/poppunk_easy_run.py',
'scripts/poppunk_pickle_fix.py'],
ext_modules=[CMakeExtension('poppunk_refine')],
test_suite="test",
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
include_package_data=True,
package_data={'': ['PopPUNK/data/*.json.gz']}
)
```
#### File: PopPUNK/test/test-refine.py
```python
import os, sys
import numpy as np
from math import sqrt
# testing without install
#sys.path.insert(0, '../build/lib.macosx-10.9-x86_64-3.8')
import poppunk_refine
# Original PopPUNK function (with some improvements)
def withinBoundary(dists, x_max, y_max, slope=2):
boundary_test = np.ones((dists.shape[0]))
for row in range(boundary_test.size):
if slope == 2:
in_tri = dists[row, 1] * x_max + dists[row, 0] * y_max - x_max * y_max
elif slope == 0:
in_tri = dists[row, 0] - x_max
elif slope == 1:
in_tri = dists[row, 1] - y_max
if abs(in_tri) < np.finfo(np.float32).eps:
boundary_test[row] = 0
elif in_tri < 0:
boundary_test[row] = -1
return(boundary_test)
def check_tuples(t1, t2):
for t in t1:
if t not in t2:
raise RuntimeError("Results don't match")
def iter_tuples(assign_results, n_samples):
tuple_list = []
idx = 0
for i in range(n_samples):
for j in range(i + 1, n_samples):
if assign_results[idx] == -1:
tuple_list.append((i, j))
idx += 1
return tuple_list
def check_res(res, expected):
if (not np.all(res == expected)):
print(res)
print(expected)
raise RuntimeError("Results don't match")
# assigning
x = np.arange(0, 1, 0.1, dtype=np.float32)
y = np.arange(0, 1, 0.1, dtype=np.float32)
xv, yv = np.meshgrid(x, y)
distMat = np.hstack((xv.reshape(-1,1), yv.reshape(-1,1)))
assign0 = poppunk_refine.assignThreshold(distMat, 0, 0.5, 0.5, 2)
assign1 = poppunk_refine.assignThreshold(distMat, 1, 0.5, 0.5, 2)
assign2 = poppunk_refine.assignThreshold(distMat, 2, 0.5, 0.5, 2)
assign0_res = withinBoundary(distMat, 0.5, 0.5, 0)
assign1_res = withinBoundary(distMat, 0.5, 0.5, 1)
assign2_res = withinBoundary(distMat, 0.5, 0.5, 2)
check_res(assign0, assign0_res)
check_res(assign1, assign1_res)
check_res(assign2, assign2_res)
# Check results when returned as tuple
samples = 100
distMat = np.random.rand(int(0.5 * samples * (samples - 1)), 2)
distMat = np.array(distMat, dtype = np.float32)
assign0_res = withinBoundary(distMat, 0.5, 0.5, 0)
assign0_edge_res = iter_tuples(assign0_res, samples)
check_tuples(assign0_edge_res,
poppunk_refine.generateTuples([int(x) for x in assign0_res], -1))
assign1_edge_res = iter_tuples(withinBoundary(distMat, 0.5, 0.5, 1), samples)
assign2_edge_res = iter_tuples(withinBoundary(distMat, 0.5, 0.5, 2), samples)
assign0_edges = poppunk_refine.edgeThreshold(distMat, 0, 0.5, 0.5)
assign1_edges = poppunk_refine.edgeThreshold(distMat, 1, 0.5, 0.5)
assign2_edges = poppunk_refine.edgeThreshold(distMat, 2, 0.5, 0.5)
check_tuples(assign0_edges, assign0_edge_res)
check_tuples(assign1_edges, assign1_edge_res)
check_tuples(assign2_edges, assign2_edge_res)
# move boundary 1D
# example is symmetrical at points (0.1, 0.1); (0.2, 0.2); (0.3, 0.3)
offsets = [x * sqrt(2) for x in [-0.1, 0.0, 0.1]]
i_vec, j_vec, idx_vec = poppunk_refine.thresholdIterate1D(distMat, offsets, 2, 0.2, 0.2, 0.3, 0.3)
sketchlib_i = []
sketchlib_j = []
for offset_idx, offset in enumerate(offsets):
for i, j, idx in zip(i_vec, j_vec, idx_vec):
if idx > offset_idx:
break
elif idx == offset_idx:
sketchlib_i.append(i)
sketchlib_j.append(j)
py_i = []
py_j = []
xmax = 0.4 + (2 * (offset/sqrt(2)))
assign = poppunk_refine.assignThreshold(distMat, 2, xmax, xmax, 1)
dist_idx = 0
for i in range(samples):
for j in range(i + 1, samples):
if assign[dist_idx] <= 0:
py_i.append(i)
py_j.append(j)
dist_idx += 1
if set(zip(py_i, py_j)) != set(zip(sketchlib_i, sketchlib_j)):
raise RuntimeError("Threshold 1D iterate mismatch at offset " + str(offset))
# move boundary 2D
# example is for boundaries (0.1, 0.2); (0.2, 0.2); (0.3, 0.2)
offsets = [0.1, 0.2, 0.3]
y_max = 0.2
i_vec, j_vec, idx_vec = poppunk_refine.thresholdIterate2D(distMat, offsets, y_max)
sketchlib_i = []
sketchlib_j = []
for offset_idx, offset in enumerate(offsets):
for i, j, idx in zip(i_vec, j_vec, idx_vec):
if idx > offset_idx:
break
elif idx == offset_idx:
sketchlib_i.append(i)
sketchlib_j.append(j)
py_i = []
py_j = []
assign = poppunk_refine.assignThreshold(distMat, 2, offset, y_max, 1)
dist_idx = 0
for i in range(samples):
for j in range(i + 1, samples):
if assign[dist_idx] <= 0:
py_i.append(i)
py_j.append(j)
dist_idx += 1
if set(zip(py_i, py_j)) != set(zip(sketchlib_i, sketchlib_j)):
raise RuntimeError("Threshold 2D iterate mismatch at offset " + str(offset))
```
#### File: PopPUNK/test/test-web.py
```python
import os
import sys
import subprocess
from shutil import copyfile
# testing without install
#sys.path.insert(0, '..')
from PopPUNK.assign import assign_query
from PopPUNK.web import default_options, summarise_clusters, get_colours, api, graphml_to_json
from PopPUNK.utils import setupDBFuncs
from PopPUNK.visualise import generate_visualisations
def main():
# Copy and move args and sketch files into example dirs
copyfile("web_args.txt", "example_db/args.txt")
copyfile("example_viz/example_viz_core_NJ.nwk", "example_viz/example_viz.nwk")
# Test the output of the PopPUNk-web upload route for incorrect data types
sys.stderr.write('\nTesting assign for PopPUNK-web\n')
with open("json_sketch.txt", "r") as s:
sketch = s.read()
species = "Listeria monocytogenes"
species_db = "example_db"
outdir = "example_api"
if not os.path.exists(outdir):
os.mkdir(outdir)
args = default_options(species_db)
qc_dict = {'run_qc': False }
print("Weights: " + str(args.assign.graph_weights))
dbFuncs = setupDBFuncs(args.assign, args.assign.min_kmer_count, qc_dict)
ClusterResult = assign_query(dbFuncs,
args.assign.ref_db,
args.assign.q_files,
outdir,
qc_dict,
args.assign.update_db,
args.assign.write_references,
args.assign.distances,
args.assign.threads,
args.assign.overwrite,
args.assign.plot_fit,
False, #args.assign.graph_weights,
args.assign.max_a_dist,
args.assign.max_pi_dist,
args.assign.type_isolate,
args.assign.model_dir,
args.assign.strand_preserved,
args.assign.previous_clustering,
args.assign.external_clustering,
args.assign.core_only,
args.assign.accessory_only,
args.assign.gpu_sketch,
args.assign.gpu_dist,
args.assign.gpu_graph,
args.assign.deviceid,
args.assign.web,
sketch,
args.assign.save_partial_query_graph)
query, query_prevalence, clusters, prevalences, alias_dict, to_include = \
summarise_clusters(outdir, species, species_db)
colours = get_colours(query, clusters)
url = api(query, "example_viz")
sys.stderr.write('PopPUNK-web assign test successful\n')
print("Done clustering")
# Test generate_visualisations() for PopPUNK-web
sys.stderr.write('\nTesting visualisations for PopPUNK-web\n')
if len(to_include) < 3:
args.visualise.microreact = False
generate_visualisations(outdir,
species_db,
os.path.join(outdir, outdir + '.dists'), # distances,
None,
args.visualise.threads,
outdir,
args.visualise.gpu_dist,
args.visualise.deviceid,
args.visualise.external_clustering,
args.visualise.microreact,
args.visualise.phandango,
args.visualise.grapetree,
args.visualise.cytoscape,
args.visualise.perplexity,
args.visualise.strand_preserved,
outdir + "/include.txt",
species_db,
species_db + "/" + os.path.basename(species_db) + "_clusters.csv",
args.visualise.previous_query_clustering,
None, # previous MST
None, # previous distances,
outdir + "/" + os.path.basename(outdir) + "_graph.gt",
args.visualise.gpu_graph,
args.visualise.info_csv,
args.visualise.rapidnj,
args.visualise.tree,
args.visualise.mst_distances,
args.visualise.overwrite,
args.visualise.core_only,
args.visualise.accessory_only,
args.visualise.display_cluster,
web=True)
networkJson = graphml_to_json(outdir)
if len(to_include) >= 3:
with open(os.path.join(outdir, os.path.basename(outdir) + "_core_NJ.nwk"), "r") as p:
phylogeny = p.read()
else:
phylogeny = "A tree cannot be built with fewer than 3 samples."
# ensure web api outputs are of the correct type
if not isinstance(species, str):
raise TypeError('"Species" datatype is incorrect, should be string.\n')
if not (isinstance(query_prevalence, float) or isinstance(query_prevalence, int)):
raise TypeError('"query_prevalence" datatype is incorrect, should be float/integer.\n')
if not isinstance(query, str):
raise TypeError('"query" datatype is incorrect, should be string.\n')
if not isinstance(clusters, list) and not isinstance(clusters[0], str):
raise TypeError('"clusters" datatype is incorrect, should be list of strings.\n')
if not isinstance(prevalences, list) and not (isinstance(prevalences[0], float) or isinstance(prevalences[0], int)):
raise TypeError('"prevalences" datatype is incorrect, should be list of floats/integers.\n')
if not isinstance(colours, list) and not isinstance(colours[0], str):
raise TypeError('"colours" datatype is incorrect, should be list of strings.\n')
if not isinstance(url, str):
raise TypeError('"url" datatype is incorrect, should be string.\n')
if not isinstance(alias_dict, dict):
raise TypeError('"alias_dict" datatype is incorrect, should be dictionary.\n')
if not isinstance(outdir, str):
raise TypeError('"outdir" datatype is incorrect, should be string.\n')
if not isinstance(networkJson, dict):
raise TypeError('"networkJson" datatype is incorrect, should be dict.\n')
if not isinstance(phylogeny, str):
raise TypeError('"phylogeny" datatype is incorrect, should be str.\n')
sys.stderr.write('\nAPI tests complete\n')
if __name__ == "__main__":
main()
``` |
{
"source": "johnlees/pyseer",
"score": 3
} |
#### File: pyseer/tests/cmdscale_test.py
```python
import unittest
import numpy as np
import pandas as pd
from pyseer.cmdscale import cmdscale
class TestCommandScale(unittest.TestCase):
input_file = 'tests/distances_smaller.tsv.gz'
Y_file = 'tests/cmdscale.Y.txt.gz'
e_file = 'tests/cmdscale.e.txt.gz'
input_data = pd.read_table(input_file, index_col=0)
Y = np.loadtxt(Y_file)
e = np.loadtxt(e_file)
# ugly hack to take into account minor
# precision problems between systems
Y = Y[:, :10]
e = e[:10]
def test_cmdscale(self):
Y, e = cmdscale(self.input_data)
# ugly hack to take into account minor
# precision problems between systems
Y = Y[:, :10]
e = e[:10]
self.assertTrue(abs((self.Y - Y).max()) < 1E-15)
self.assertTrue(abs((self.e - e).max()) < 1E-15)
if __name__ == '__main__':
unittest.main()
```
#### File: pyseer/tests/lmm_test.py
```python
import os
import warnings
import unittest
import numpy as np
import pandas as pd
from pyseer.lmm import initialise_lmm
from pyseer.lmm import fit_lmm
from pyseer.lmm import fit_lmm_block
from pyseer.classes import LMM
DATA_DIR = 'tests'
P_BINARY = os.path.join(DATA_DIR, 'subset.pheno')
S = os.path.join(DATA_DIR, 'similarity_subset.tsv.gz')
COV = os.path.join(DATA_DIR, 'covariates.txt')
C = os.path.join(DATA_DIR, 'lmm_cache.npz')
K = os.path.join(DATA_DIR, 'unit_tests_data', 'k.txt')
M = os.path.join(DATA_DIR, 'unit_tests_data', 'm.txt')
def eq_lmm(s1, s2):
"""Test whether two LMM objects are the same"""
diff = set()
for p in ['kmer', 'pattern',
'kstrains', 'nkstrains', 'notes',
'prefilter', 'filter']:
x = getattr(s1, p)
y = getattr(s2, p)
if x != y:
diff.add(p)
for p in ['af', 'prep', 'pvalue',
'kbeta', 'bse', 'frac_h2']:
x = getattr(s1, p)
y = getattr(s2, p)
if not np.isfinite(x) and not np.isfinite(y):
continue
if np.isfinite(x) and not np.isfinite(y):
diff.add(p)
if np.isfinite(y) and not np.isfinite(x):
diff.add(p)
if abs(x - y) > 1E-7:
diff.add(p)
if s1.max_lineage is not None and s2.max_lineage is not None:
p = 'max_lineage'
x = getattr(s1, p)
y = getattr(s2, p)
if not np.isfinite(x) and not np.isfinite(y):
pass
else:
if np.isfinite(x) and not np.isfinite(y):
diff.add(p)
if np.isfinite(y) and not np.isfinite(x):
diff.add(p)
if x != y:
diff.add(p)
elif s1.max_lineage is None and s2.max_lineage is None:
pass
else:
diff.add('max_lineage')
return diff
class TestInitialiseLmm(unittest.TestCase):
def test_initialise_lmm(self):
p = pd.read_table(P_BINARY,
index_col=0)['binary']
cov = pd.DataFrame([])
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=None,
lmm_cache_out=None)
self.assertEqual(x.shape[0], 50)
self.assertAlmostEqual(y.findH2()['nLL'][0],
35.7033778)
self.assertAlmostEqual(z, 0.0)
# covariates
cov = pd.read_table(COV, index_col=0,
header=None)
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=None,
lmm_cache_out=None)
self.assertEqual(x.shape[0], 50)
self.assertAlmostEqual(y.findH2()['nLL'][0],
34.55403861)
self.assertAlmostEqual(z, 0.0)
# sample names not matching
b = pd.Series(np.random.random(100),
index=['test_%d' % x for x in range(100)])
with warnings.catch_warnings():
warnings.simplefilter('ignore')
x, y, z = initialise_lmm(b, cov, S,
lmm_cache_in=None,
lmm_cache_out=None)
self.assertEqual(x.shape[0], 0)
self.assertTrue(not np.isfinite(y.findH2()['nLL'][0]))
self.assertAlmostEqual(z, 0.0)
# save cache
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=None,
lmm_cache_out=C)
# load cache
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=C,
lmm_cache_out=None)
self.assertEqual(x.shape[0], 50)
self.assertAlmostEqual(y.findH2()['nLL'][0],
34.55403861)
self.assertAlmostEqual(z, 0.0)
# different sizes
b = pd.Series(np.random.random(10),
index=['test_%d' % x for x in range(10)])
with self.assertRaises(SystemExit) as cm:
initialise_lmm(b, cov, S,
lmm_cache_in=C,
lmm_cache_out=None)
self.assertEqual(cm.exception.code, 1)
# matching lineage samples
cov = pd.DataFrame([])
s = pd.read_table(S, index_col=0)
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=None,
lmm_cache_out=None,
lineage_samples=s.index)
# non-matching lineage samples
with self.assertRaises(SystemExit) as cm:
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=None,
lmm_cache_out=None,
lineage_samples=s.index[:-1])
class TestFitLmm(unittest.TestCase):
def test_fit_lmm(self):
p = pd.read_table(P_BINARY,
index_col=0)['binary']
cov = pd.DataFrame([])
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=None,
lmm_cache_out=None)
var = LMM('variant',
'pattern',
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
k = np.loadtxt(K)[:p.shape[0]]
variants = [(var, p.values, k),]
variant_mat = k.reshape(-1, 1)
results = fit_lmm(y, z,
variants, variant_mat,
False, [], cov,
False, 1, 1)
test_results = [LMM('variant', 'pattern', 0.2,
0.28252075514059294,
0.2920532220978148,
0.1513687600644123,
0.1420853593711293,
0.1519818397711344,
None,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), False, False),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
# af filtering
var = LMM('variant',
None,
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
variants = [(var, p.values, k),]
variant_mat = k.reshape(-1, 1)
results = fit_lmm(y, z,
variants, variant_mat,
False, [], cov,
False, 1, 1)
test_results = [LMM('variant', None, 0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(['af-filter']), True, False),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
# bad-chisq
var = LMM('variant',
'pattern',
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
bad_k = np.array([1]*5 + [0]*(p.shape[0]-5))
variants = [(var, p.values, bad_k),]
variant_mat = bad_k.reshape(-1, 1)
results = fit_lmm(y, z,
variants, variant_mat,
False, [], cov,
False, 1, 1)
test_results = [LMM('variant', 'pattern', 0.2,
0.2544505826463333,
0.263519965703956,
0.2666666666666663,
0.2357022603955158,
0.16116459280507586,
None,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(['bad-chisq']), False, False),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
# pre-filtering
var = LMM('variant',
'pattern',
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
k = np.loadtxt(K)[:p.shape[0]]
variants = [(var, p.values, k),]
variant_mat = k.reshape(-1, 1)
results = fit_lmm(y, z,
variants, variant_mat,
False, [], cov,
False, 0.05, 1)
test_results = [LMM('variant', 'pattern', 0.2,
0.28252075514059294,
np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(['pre-filtering-failed']), True, False),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
# lrt-filtering
var = LMM('variant',
'pattern',
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
k = np.loadtxt(K)[:p.shape[0]]
variants = [(var, p.values, k),]
variant_mat = k.reshape(-1, 1)
results = fit_lmm(y, z,
variants, variant_mat,
False, [], cov,
False, 1, 0.05)
test_results = [LMM('variant', 'pattern', 0.2,
0.28252075514059294,
0.2920532220978148,
np.nan, np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(['lrt-filtering-failed']), False, True),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
# lineage fit
var = LMM('variant',
'pattern',
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
k = np.loadtxt(K)[:p.shape[0]]
variants = [(var, p.values, k),]
variant_mat = k.reshape(-1, 1)
m = np.loadtxt(M)[:p.shape[0]]
results = fit_lmm(y, z,
variants, variant_mat,
True, m, cov,
False, 1, 1)
test_results = [LMM('variant', 'pattern', 0.2,
0.28252075514059294,
0.2920532220978148,
0.1513687600644123,
0.1420853593711293,
0.1519818397711344,
0,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), False, False),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
# lineage fit + covariates
var = LMM('variant',
'pattern',
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
k = np.loadtxt(K)[:p.shape[0]]
variants = [(var, p.values, k),]
variant_mat = k.reshape(-1, 1)
m = np.loadtxt(M)[:p.shape[0]]
cov = pd.read_table(COV, index_col=0, header=None).values
results = fit_lmm(y, z,
variants, variant_mat,
True, m, cov,
False, 1, 1)
test_results = [LMM('variant', 'pattern', 0.2,
0.28252075514059294,
0.2920532220978148,
0.1513687600644123,
0.1420853593711293,
0.1519818397711344,
0,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), False, False),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
# continuous phenotype
var = LMM('variant',
'pattern',
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
k = np.loadtxt(K)[:p.shape[0]]
variants = [(var, p.values, k),]
variant_mat = k.reshape(-1, 1)
results = fit_lmm(y, z,
variants, variant_mat,
False, [], cov,
True, 1, 1)
test_results = [LMM('variant', 'pattern', 0.2,
0.0,
0.2920532220978148,
0.1513687600644123,
0.1420853593711293,
0.1519818397711344,
None,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), False, False),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
class TestFitLmmBlock(unittest.TestCase):
def test_fit_lmm_block(self):
p = pd.read_table(P_BINARY,
index_col=0)['binary']
cov = pd.DataFrame([])
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=None,
lmm_cache_out=None)
k = np.loadtxt(K)[:p.shape[0]]
variant_mat = k.reshape(-1, 1)
result = fit_lmm_block(y, z, variant_mat)
self.assertAlmostEqual(result['beta'][0],
0.15136876)
self.assertAlmostEqual(result['bse'][0],
0.14208536)
self.assertAlmostEqual(result['frac_h2'][0],
0.15198184)
self.assertAlmostEqual(result['p_values'][0],
0.29205322)
# impossibly high h2
with self.assertRaises(KeyError):
fit_lmm_block(y, 1, variant_mat)
# shape mismatch
with self.assertRaises(AssertionError):
fit_lmm_block(y, z, variant_mat[:10])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johnlev/coderdojo-curriculum",
"score": 4
} |
#### File: coderdojo-curriculum/Week1/car_activity.py
```python
from cs1lib import *
from math import *
# Constants
ROAD_WIDTH = 50
CAR_WIDTH = 15
CAR_HEIGHT = 25
WIDTH = 600
HEIGHT = 600
BLOCK_HEIGHT = HEIGHT / 4
LINE_HEIGHT = 10
LINE_WIDTH = 3
ANIMATION_LENGTH = 60
PARKING_LOT_HEIGHT = 40
HEADING_MAP = {'n': {'l': 'w', 'r': 'e'}, 'e': {'l': 'n', 'r': 's'}, 's': {'l': 'e', 'r': 'w'}, 'w': {'l': 's', 'r': 'n'}}
# Variables
action_queue = []
car_on = False
car_pos = (WIDTH / 2 + ROAD_WIDTH / 8 + CAR_WIDTH / 2, HEIGHT + CAR_HEIGHT / 2)
last_car_pos = car_pos
last_turn = None
car_heading = 'n'
car_angle = 0
last_car_angle = car_angle
current_action = 0
frame = -30
def draw():
"""
:return:
"""
set_framerate(ANIMATION_LENGTH / 2)
set_clear_color(0, 1, 0)
clear()
set_fill_color(0.2, 0.2, 0.2)
draw_rectangle(0, HEIGHT + PARKING_LOT_HEIGHT, WIDTH, -PARKING_LOT_HEIGHT)
draw_road(WIDTH / 2 - ROAD_WIDTH / 2, HEIGHT)
draw_intersection(WIDTH / 2 - ROAD_WIDTH / 2, HEIGHT - BLOCK_HEIGHT)
draw_road(WIDTH / 2 - ROAD_WIDTH / 2, HEIGHT - BLOCK_HEIGHT, rotated=True)
draw_road(WIDTH / 2 + ROAD_WIDTH / 2 + BLOCK_HEIGHT, HEIGHT - BLOCK_HEIGHT, rotated=True)
draw_intersection(WIDTH / 2 + ROAD_WIDTH / 2 + BLOCK_HEIGHT, HEIGHT - BLOCK_HEIGHT)
draw_intersection(WIDTH / 2 - BLOCK_HEIGHT - ROAD_WIDTH * 3 / 2, HEIGHT - BLOCK_HEIGHT)
draw_road(WIDTH / 2 - BLOCK_HEIGHT - ROAD_WIDTH * 3 / 2, HEIGHT - BLOCK_HEIGHT - ROAD_WIDTH)
draw_intersection(WIDTH / 2 - BLOCK_HEIGHT - ROAD_WIDTH * 3 / 2, HEIGHT - BLOCK_HEIGHT * 2 - ROAD_WIDTH)
set_fill_color(0, 0, 1)
draw_circle(WIDTH / 2 - BLOCK_HEIGHT - ROAD_WIDTH * 3 / 2 + ROAD_WIDTH / 2, HEIGHT - BLOCK_HEIGHT * 2 - ROAD_WIDTH - ROAD_WIDTH / 2, ROAD_WIDTH / 3)
draw_road(WIDTH / 2 + BLOCK_HEIGHT + ROAD_WIDTH / 2, HEIGHT - BLOCK_HEIGHT - ROAD_WIDTH)
draw_intersection(WIDTH / 2 + BLOCK_HEIGHT + ROAD_WIDTH / 2, HEIGHT - BLOCK_HEIGHT * 2 - ROAD_WIDTH)
draw_road(WIDTH / 2 + BLOCK_HEIGHT + ROAD_WIDTH / 2, HEIGHT - BLOCK_HEIGHT * 2 - ROAD_WIDTH, rotated=True)
draw_intersection(WIDTH / 2 - ROAD_WIDTH / 2, HEIGHT - BLOCK_HEIGHT * 2 - ROAD_WIDTH)
draw_road(WIDTH / 2 - ROAD_WIDTH / 2, HEIGHT - BLOCK_HEIGHT * 2 - ROAD_WIDTH * 2)
draw_intersection(WIDTH / 2 - ROAD_WIDTH / 2, HEIGHT - BLOCK_HEIGHT * 3 - ROAD_WIDTH * 2)
draw_road(WIDTH / 2 - ROAD_WIDTH / 2, HEIGHT - BLOCK_HEIGHT * 3 - ROAD_WIDTH * 2, rotated=True)
draw_intersection(WIDTH / 2 - BLOCK_HEIGHT - ROAD_WIDTH * 3 / 2, HEIGHT - BLOCK_HEIGHT * 3 - ROAD_WIDTH * 2)
draw_road(WIDTH / 2 - BLOCK_HEIGHT - ROAD_WIDTH * 3 / 2, HEIGHT - BLOCK_HEIGHT * 2 - ROAD_WIDTH * 2)
global current_action
if current_action < len(action_queue):
global frame
global car_pos
global action
global last_car_pos
global last_car_angle
global car_heading
global car_angle
global last_turn
if frame < 10 or frame > ANIMATION_LENGTH - 10:
# Do nothing
pass
else:
# Move the car in the correct direction
action = action_queue[current_action]
if action == 'f':
if car_heading == 'n' or car_heading == 's':
distance = abs(last_car_pos[1] - (BLOCK_HEIGHT + ROAD_WIDTH) * (-1 if car_heading == 's' else 1) - car_pos[1])
car_pos = (car_pos[0], car_pos[1] - distance / (ANIMATION_LENGTH - 9 - frame) * (-1 if car_heading == 's' else 1))
if car_heading == 'e' or car_heading == 'w':
distance = abs(last_car_pos[0] + (BLOCK_HEIGHT + ROAD_WIDTH) * (-1 if car_heading == 'w' else 1) - car_pos[0])
car_pos = (car_pos[0] + distance / (ANIMATION_LENGTH - 9 - frame) * (-1 if car_heading == 'w' else 1), car_pos[1])
else:
distance = abs((last_car_angle - 90 * (-1 if action == 'r' else 1)) - car_angle)
car_angle = car_angle - distance / (ANIMATION_LENGTH - 9 - frame) * (-1 if action == 'r' else 1)
if frame >= ANIMATION_LENGTH:
last_car_pos = car_pos
last_car_angle = car_angle
if action_queue[current_action] != 'f':
car_heading = HEADING_MAP[car_heading][action_queue[current_action]]
last_turn = action_queue[current_action]
frame = 0
current_action += 1
else:
frame += 1
draw_car(car_pos, car_angle)
def draw_road(x, y, rotated=False):
set_fill_color(0.2, 0.2, 0.2)
set_stroke_color(0, 0, 0, 0)
if not rotated:
draw_rectangle(x, y, ROAD_WIDTH, -BLOCK_HEIGHT)
for line in range(0, int(BLOCK_HEIGHT / LINE_HEIGHT / 2) + 1):
set_fill_color(1, 1, 0)
draw_rectangle(x + ROAD_WIDTH / 2 - LINE_WIDTH / 2, y - line * LINE_HEIGHT * 2, LINE_WIDTH, -LINE_HEIGHT)
else:
draw_rectangle(x, y, -BLOCK_HEIGHT, -ROAD_WIDTH)
for line in range(0, int(BLOCK_HEIGHT / LINE_HEIGHT / 2) + 1):
set_fill_color(1, 1, 0)
draw_rectangle(x - line * LINE_HEIGHT * 2, y - ROAD_WIDTH / 2 + LINE_WIDTH / 2, -LINE_HEIGHT, -LINE_WIDTH)
def draw_intersection(x, y):
set_fill_color(0.2, 0.2, 0.2)
set_stroke_color(0, 0, 0, 0)
draw_rectangle(x, y, ROAD_WIDTH, -ROAD_WIDTH)
def draw_car(pos, angle):
x, y = pos
points = []
for i, j in [(1, 1), (1, -1), (-1, -1), (-1, 1)]:
tempX = j * CAR_WIDTH / 2
tempY = i * CAR_HEIGHT / 2
rotatedX = tempX * cos(radians(angle)) - tempY * sin(radians(angle))
rotatedY = tempX * sin(radians(angle)) + tempY * cos(radians(angle))
x1 = rotatedX + x
y1 = rotatedY + y
points.append((x1, y1))
set_fill_color(1, 0, 1)
draw_polygon(points)
def start():
global car_on
car_on = True
def move_forward():
if car_on:
action_queue.append('f')
def turn_left():
if car_on:
action_queue.append('l')
def turn_right():
if car_on:
action_queue.append('r')
def park():
global car_on
car_on = False
def done():
start_graphics(draw, width=WIDTH, height=HEIGHT + PARKING_LOT_HEIGHT)
if __name__ == "__main__":
done()
```
#### File: coderdojo-curriculum/Week4/drawing_activity.py
```python
from cs1lib import *
WIDTH = 800
HEIGHT = 550
imageFiles = ['res/bush.png', 'res/darkClouds.png', 'res/flowers.png', 'res/grass.png', 'res/lightClouds.png',
'res/mountains.png', 'res/road.png', 'res/sun.png', 'res/sky.png']
images = {}
imageQueue = []
def load_images():
for filename in imageFiles:
images[filename[4:-4]] = load_image(filename)
def draw_bush():
imageQueue.append("bush")
def draw_dark_clouds():
imageQueue.append("darkClouds")
def draw_flowers():
imageQueue.append("flowers")
def draw_grass():
imageQueue.append("grass")
def draw_light_clouds():
imageQueue.append("lightClouds")
def draw_mountains():
imageQueue.append("mountains")
def draw_road():
imageQueue.append("road")
def draw_sun():
imageQueue.append("sun")
def draw_sky():
imageQueue.append("sky")
def main_draw():
clear()
for imageKey in imageQueue:
draw_image(images[imageKey], 0, 0)
def done():
load_images()
start_graphics(main_draw, width=WIDTH, height=HEIGHT)
```
#### File: Week5/AdvancedActivities/advancedactivity1.py
```python
import random
from Week5.AdvancedActivities.Ball import Ball
from cs1lib import *
WIDTH = 800
HEIGHT = 550
GRAVITY = 1
ballList = []
def add_ball(x=None, y=None, rad=None, r=None, g=None, b=None):
x = random.uniform(0, WIDTH) if x is None else x
y = random.uniform(50, 300) if y is None else y
rad = random.uniform(10, 30) if rad is None else rad
r = random.uniform(0, 1) if r is None else r
g = random.uniform(0, 1) if g is None else g
b = random.uniform(0, 1) if b is None else b
ballList.append(Ball(x, y, rad, r, g, b))
def main_draw():
clear()
for ball in ballList:
ball.draw(GRAVITY, HEIGHT)
"""
Activity Description:
Now that we know that the order which we draw objects on a canvas matters, we will now focus on movement of such
drawings on scene, otherwise known as animation. Animation can be defined as a sequence of still images that when
displayed quickly in a series. Changing the position/shape of an object in one image to the next can help convey
the motion of an object. In this activity, we will be changing the position of a ball to make it look as though it were
bouncing. The reality is that will be calculating the position of the ball depending on its speed at any point. Within
the Ball.py file, add the necessary code and functionality to the ball to allow it to bounce.
"""
for i in range(0, 10):
add_ball()
start_graphics(main_draw, width=WIDTH, height=HEIGHT, framerate=60)
```
#### File: coderdojo-curriculum/Week7/Cat.py
```python
from Week7.Animal import Animal
class Cat(Animal):
def __init__(self, color, age, numOfWhiskers, breed, favFood, collar = None):
Animal.__init__(self, color, age, "Cat")
self.numOfWhiskers = numOfWhiskers
self.breed = breed
self.favFood = favFood
self.noise = "Meow"
self.collar = collar
def makeSound(self):
print("{}!".format(self.noise))
def getInfo(self):
Animal.getInfo(self)
print("I am a {} and my favorite food is a {}".format(self.breed, self.favFood))
print("I have about {} whiskers.".format(self.numOfWhiskers))
if (self.collar):
print("Oh, there's a collar attached. It says...")
self.collar.getInfo()
print()
```
#### File: coderdojo-curriculum/Week7/Dog.py
```python
from Week7.Animal import Animal
class Dog(Animal):
def __init__(self, color, age, tricks, breed, favToy, collar = None):
Animal.__init__(self, color, age, "Dog")
self.tricks = tricks
self.breed = breed
self.favToy = favToy
self.noise = "Bark"
self.collar = collar
def makeSound(self):
print("{}!".format(self.noise))
def getInfo(self):
Animal.getInfo(self)
print("I am a {} and my favorite toy is a {}".format(self.breed, self.favToy))
print("I can do the following tricks:")
for trick in self.tricks:
print(trick)
if(self.collar):
print("Oh, there's a collar attached. It says...")
self.collar.getInfo()
print()
```
#### File: coderdojo-curriculum/Week8/App.py
```python
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from Week8.Sorter_activity import Sorter
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'Sorting Visualizer'
self.width = 550
self.height = 625
self.fragDimension = 50
self.pixmap = QPixmap("res/catimage.jpg")
self.sorter = Sorter(self, self.pixmap.width(), self.pixmap.height(), self.fragDimension)
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setFixedSize(self.width, self.height)
self.centerFrame()
self.shuffleButton = QPushButton('Shuffle', self)
self.shuffleButton.setToolTip('Shuffle the pixels of the Image')
self.shuffleButton.move(25, 550)
self.shuffleButton.clicked.connect(self.sorter.shuffleImage)
self.bubbleSortButton = QPushButton('BubbleSort', self)
self.bubbleSortButton.setToolTip('Start a bubble sort of the image')
self.bubbleSortButton.move(125, 550)
self.bubbleSortButton.clicked.connect(self.sorter.bubblesort)
self.mergeSortButton = QPushButton('MergeSort', self)
self.mergeSortButton.setToolTip('Start a merge sort of the image')
self.mergeSortButton.move(225, 550)
self.mergeSortButton.clicked.connect(self.sorter.mergesort)
self.quickSortButton = QPushButton('QuickSort', self)
self.quickSortButton.setToolTip('Start a quick sort of the image')
self.quickSortButton.move(325, 550)
self.quickSortButton.clicked.connect(self.sorter.quicksort)
self.radixSortButton = QPushButton('RadixSort', self)
self.radixSortButton.setToolTip('Start a radix sort of the image')
self.radixSortButton.move(425, 550)
self.radixSortButton.clicked.connect(self.sorter.radixsort)
self.getImageButton = QPushButton('Get new image', self)
self.getImageButton.setToolTip('Get a new image to sort')
self.getImageButton.move(225, 585)
self.getImageButton.clicked.connect(self.getNewImage)
self.disableAllButtons()
self.enableShuffleButton()
def centerFrame(self):
resolution = QDesktopWidget().screenGeometry()
self.move((resolution.width() / 2) - (self.frameSize().width() / 2),
(resolution.height() / 2) - (self.frameSize().height() / 2))
def enableShuffleButton(self):
self.shuffleButton.setEnabled(True)
self.getImageButton.setEnabled(True)
def enableSortingButtons(self):
self.bubbleSortButton.setEnabled(True)
self.mergeSortButton.setEnabled(True)
self.quickSortButton.setEnabled(True)
self.radixSortButton.setEnabled(True)
def disableAllButtons(self):
self.shuffleButton.setEnabled(False)
self.bubbleSortButton.setEnabled(False)
self.mergeSortButton.setEnabled(False)
self.quickSortButton.setEnabled(False)
self.radixSortButton.setEnabled(False)
self.getImageButton.setEnabled(False)
def getNewImage(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self, "Get image file", "res/",
"Image Files (*.png *.jpg *.jpeg)", options=options)
if fileName:
newPixmap = QPixmap(fileName)
if(newPixmap.width() != 500 or newPixmap.height() != 500):
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText("Incorrect Image Size")
msg.setInformativeText("Please choose an image file that is 500 x 500.\nYour image size: {} x {}".format(newPixmap.width(), newPixmap.height()))
msg.setWindowTitle("Warning: Incorrect Image Size")
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
else:
self.pixmap = newPixmap
self.update()
# An event-based paint method that is called each time an App.update() in called.
# This redraws the shuffled image on the window based on the current state of self.sorter.
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
fragWidth = self.pixmap.width() / self.fragDimension
fragHeight = self.pixmap.height() / self.fragDimension
for y in range(self.fragDimension):
for x in range(self.fragDimension):
srcTuple = self.sorter.getSubpixmapTuple(x + y * self.fragDimension)
painter.drawPixmap(25 + (x * fragWidth), 25 + (y * fragHeight), fragWidth, fragHeight, self.pixmap,
srcTuple[0], srcTuple[1], srcTuple[2], srcTuple[3])
painter.end()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
ex.show()
sys.exit(app.exec_())
``` |
{
"source": "johnlfield/DayCent_regional",
"score": 2
} |
#### File: johnlfield/DayCent_regional/DayCent_regional_postprocess-farm-level.py
```python
import constants as con
from IPython.display import Image, display
import json
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import os
import pandas as pd
import plotly.express as px
import sys
from urllib.request import urlopen
# ## Load runtable
# Individual DayCent strata are specified via a .csv format "runtable" file, which contains the following information:
# * unique identifier (strata_no)
# * ID for DayCent spin-up archive (runno)
# * SSURGO soil map unit ID (mukey_int)
# * NARR climate grid ID (gridx & gridy)
# * county FIPS code (fips)
# * DayCent-format schedule file to simulate (sch_file)
# * latitude of the county centroid, used to set perennial grass phenology (latitude)
# * for simulations on abandoned agricultural land, year of peak ag land extent (peak_year)
# * land area represented by that strata (tot_ha)
# The code below loads the relevant runtable to a Pandas dataframe.
runtable = "eastern_US_runtable_incl81.csv"
run_df = pd.read_csv(runtable, skiprows=[1]) # skip SQL datatype row
run_df
# ## Load DayCent results
# Raw DayCent model output is spread across two files:
# * .lis files contain information related to per-area biomass harvest and soil carbon
# * year_summary.out contains per-area trace gas emissions
#
# Reading these DayCent results data directly from the NREL network via Pulse Secure is ideal for avoiding clutter on my local machine. However, that is only practical for smaller datasets; multi-GB data takes hours to load. I had also tried to develop code in this notebook to sub-set the large DayCent raw results files on the NREL network (see code_scraps.py), to minimize the volume of data being read over the network or downloaded locally. However, it seems that reading network data over Pulse Secure is the bottle-neck (not necessarily loading it into a large Pandas DataFrame), so running that sub-setting routine from this notebook on my own machine was similarly slow.
#
# I eventually found it quicker & more flexible to download the big raw data files locally via a shell Secure Copy command (with Pulse Secure DISabled), and process via a more normal linear workflow. The scp step takes approximately 2 min per GB. After that, reading the local data to memory is reasonable (~2 min), and merging & filtering steps are quick enough (usually ~1 min each) that there is no need to change my workflow (designed more for code length & clarity than memory management). Here's an example shell Secure Copy command, for reference:
# ```console
# scp <EMAIL>:/data/paustian/AFRI/simulations/results/2019-11-01,00.37__eastern_US_runtable_incl81__90__drought_sensitivity/year_summary.out /Users/johnfield/Desktop/2019-11-01,00.37__eastern_US_runtable_incl81__90__drought_sensitivity_year_summary.out
# ```
# specify simulation names for both baseline & drought tolerance results
base_sims = '2019-09-16,13.26__eastern_US_runtable_incl81__79__CBI_baseline'
drought_sims = '2019-11-01,00.37__eastern_US_runtable_incl81__90__drought_sensitivity'
# +
# # inspect head of raw data files over Pulse Secure
# results_path = '/Volumes/wcnr-network/Research/Paustian/AFRI/simulations/results/'
# base_fpath = os.path.join(results_path, base_sims, 'X.lis')
# drought_fpath = os.path.join(results_path, drought_sims, 'X.lis')
# # base python equivalent to a basd 'head' command
# with open(base_fpath) as myfile:
# head = [next(myfile) for x in range(5)]
# print(head)
# print()
# with open(drought_fpath) as myfile:
# head = [next(myfile) for x in range(5)]
# print(head)
# -
# First, we load and concatenate the .lis output for the relevant switchgrass variety scenarios.
# +
# %%time
results_path = '/Users/johnfield/Desktop/'
# .lis data import, skipping SQL datatype rows
base_lis_fpath = os.path.join(results_path, base_sims+'_X.lis')
base_lis_df = pd.read_csv(base_lis_fpath, skiprows=[1]) # 3.7 GB
base_lis_df['variety'] = 'base'
drought_lis_fpath = os.path.join(results_path, drought_sims+'_X.lis')
drought_lis_df = pd.read_csv(drought_lis_fpath, skiprows=[1]) # 3.7 GB
drought_lis_df['variety'] = 'drought_tol'
# concatenate scenario results
lis_df = pd.concat([base_lis_df, drought_lis_df], axis=0)
# drop dummy data, including placeholder 'crop' & 'land_type' columns, and blank rows at the end
# of .lis output for each simulation (time=2069)
lis_df.drop(columns=['crop', 'land_type'], inplace=True)
lis_df = lis_df[lis_df['time'] != 2069]
# -
# Then, we do the same for the year_summary.out results.
# +
# %%time
# year_summary.out data import, skipping SQL datatype rows
base_ys_fpath = os.path.join(results_path, base_sims+'_year_summary.out')
base_ys_df = pd.read_csv(base_ys_fpath, skiprows=[1]) # 3.7 GB
base_ys_df['variety'] = 'base'
drought_ys_fpath = os.path.join(results_path, drought_sims+'_year_summary.out')
drought_ys_df = pd.read_csv(drought_ys_fpath, skiprows=[1]) # 3.7 GB
drought_ys_df['variety'] = 'drought_tol'
# concatenate scenario results
ys_df = pd.concat([base_ys_df, drought_ys_df], axis=0)
# drop dummy placeholder 'crop' & 'land_type' columns
ys_df.drop(columns=['crop', 'land_type'], inplace=True)
# -
# Repeat the same sequence for the drought-tolerant variety scenario
# +
# check no. of strata in merged results for consistency w/ runtable
print("Unique .lis strata count:", lis_df.strata_no.nunique())
print("Unique year_summary.out strata count:", ys_df.strata_no.nunique())
# +
# note that these abandoned land simulations begin in different years in different counties
lis_df.time.value_counts().sort_index()
# -
# # Analysis
# ## Merge .lis & .out data
# +
# %%time
# merge .lis & year_summary.out DataFrames
df = pd.merge(lis_df, ys_df, on=['strata_no', 'variety', 'time'])
# drop unneeded DayCent outputs
df = df[['strata_no', 'variety', 'time', 'crmvst',
'd_somsc', 'N2Oflux', 'strmac(2)', 'volpac', 'NOflux', 'CH4']]
df
# -
# ## Unit conversions
# Most DayCent outputs are in units of grams of carbon per meter squared (g C m-2), though some of the nitrogen flux results are reported on a per-hectare basis instead. The code below performs basic unit converions to express the results in more familiar units of kg or Mg per hectare. It is implemented as a loop, so that the same code can be used to process both the 'base' and 'drought' cases.
# +
# %%time
# drop all past period results
df = df[df['time'] >= 2020]
# unit conversions
df['yield_Mg_ha'] = ((df['crmvst'] * con.g_m2_to_Mg_ha) / con.C_concentration)
df['dSOC_MgC_ha'] = (df['d_somsc'] * con.g_m2_to_Mg_ha)
df['dN2ON_kgN_ha'] = (df['N2Oflux'] * con.g_m2_to_kg_ha)
df['iN2ON_kgN_ha'] = ((0.0075 * df['strmac(2)'] + 0.01 * df['volpac'] +
0.01 * df['NOflux']) * con.g_m2_to_kg_ha)
df['kgCH4_ox_ha'] = (df['CH4'] * con.g_m2_to_kg_ha)
df['ghg_MgCO2e_ha'] = (df['dSOC_MgC_ha'] * con.C_to_CO2 * -1.0) + \
((df['dN2ON_kgN_ha'] + df['iN2ON_kgN_ha']) *
con.kg_ha_to_Mg_ha * con.N_to_N2O * con.N2O_GWP100_AR5) + \
(df['kgCH4_ox_ha'] * con.kg_ha_to_Mg_ha * con.CH4_GWP100_AR5 * -1.0)
df['volit_kgN_ha'] = df['volpac'] * con.g_m2_to_kg_ha
df['leached_kgN_ha'] = df['strmac(2)'] * con.g_m2_to_kg_ha
# drop the original columns with non-standard unit data
df.drop(columns=['crmvst', 'd_somsc', 'N2Oflux',
'strmac(2)', 'volpac', 'NOflux', 'CH4'], inplace=True)
df
# -
# ## Area-weighted county aggregation
# +
# %%time
# re-associate FIPS codes and land areas with strata results
area_df = pd.merge(run_df[['strata_no', 'fips', 'tot_ha']], df, on='strata_no')
area_df
# -
# First, we combine the total production and impacts associated with each strata in each year by multiplying the per-ha results with the area represented by each strata:
# +
# %%time
# calculate area totals & drop obsolete data
area_tot_df = area_df.copy(deep=False)
columns = ['yield_Mg', 'dSOC_MgC', 'dN2ON_kgN', 'iN2ON_kgN',
'kgCH4_ox', 'ghg_MgCO2e', 'volit_kgN', 'leached_kgN']
for column in columns:
area_tot_df[column] = area_tot_df[column+'_ha'] * area_tot_df['tot_ha']
area_tot_df.drop(columns=[column+'_ha'], inplace=True)
area_tot_df
# -
# Then, we aggregate these results to the county scale, and divide by the total area simulated for each county to calculate area-weighted results:
# +
# %%time
# group results to annual county scale
retain_data = ['fips', 'variety', 'time', 'tot_ha', 'yield_Mg',
'dSOC_MgC', 'dN2ON_kgN', 'iN2ON_kgN',
'kgCH4_ox', 'ghg_MgCO2e', 'volit_kgN', 'leached_kgN']
annual_df = area_tot_df[retain_data].groupby(['fips', 'variety', 'time']).sum()
annual_df = annual_df.reset_index()
# divide by area & drop obsolete data
for column in ['dSOC_MgC', 'dN2ON_kgN', 'iN2ON_kgN',
'kgCH4_ox', 'ghg_MgCO2e', 'volit_kgN', 'leached_kgN']:
annual_df[column+'_ha'] = annual_df[column] / annual_df['tot_ha']
annual_df.drop(columns=[column], inplace=True)
# calculate per-area yields and total county production in mega-tonne (Mt) units
annual_df['yield_Mg_ha'] = annual_df['yield_Mg'] / annual_df['tot_ha']
annual_df['prod_Mt'] = annual_df['yield_Mg'] * 1.0e-6
annual_df.drop(columns=['yield_Mg'], inplace=True)
# calculate fraction of annual N application lost via leaching + volatilization
annual_df['leach_volit_frac'] = (annual_df['volit_kgN_ha'] +
annual_df['leached_kgN_ha']) / 75.0
annual_df
# -
# ## Time-averaging
# Finally, we aggregate over simulation years to get time-averaged county-scale results:
# +
county_df = annual_df.groupby(['fips', 'variety']).mean()
county_df.reset_index(inplace=True)
# add leading zeros to FIPS codes (otherwise this causes mapping failure)
county_df['fips'] = county_df['fips'].apply(lambda x: '{0:0>5}'.format(x))
# -
# # Base case maps
# +
# import shapefile of county boundaries
shapeurl = 'https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json'
with urlopen(shapeurl) as response:
counties = json.load(response)
# define lat/long/zoom factors appropriate for visualizing the Corn Belt
map_width = 500 # pixels
aspect_ratio = 0.64
map_zoom = 2.8
cen_lat = 38.5
cen_lon = -87.5
prod_lim = 1.0
prod_colors = 'deep'
# define standard plot limits across crops
yield_lims = [0, 25.0] # (Mg ha-1 y-1)
area_lims = [0, 1.0e5] # (ha)
prod_lims = [0, 1.0] # (Mt y-1)
# In interactive mapping mode, live Plotly maps maps are displayed. If false,
# static maps are saved in PNG & PDF formats, and static PNG map is displayed
interactive_map = False
def mapping(df, data_column, colorscale, label, kwargs={}):
my_map = px.choropleth_mapbox(df, geojson=counties, locations='fips',
color=data_column,
color_continuous_scale=colorscale,
mapbox_style='open-street-map',
zoom=map_zoom,
center={"lat": cen_lat, "lon": cen_lon},
labels={data_column: label},
**kwargs)
my_map.update_layout(width=map_width, height=map_width*aspect_ratio,
margin={"r":0,"t":0,"l":0,"b":0})
if interactive_map:
my_map.show()
else:
my_map.write_image(data_column + ".pdf")
my_map.write_image(data_column + ".png")
display(Image(filename=data_column + ".png"))
# -
mapping(county_df[county_df.variety == 'base'],
'tot_ha', 'Oranges', 'Mt y<sup>-1</sup>',
kwargs={"range_color": [0, 120e3]})
mapping(county_df[county_df.variety == 'base'],
'yield_Mg_ha', 'Greens', 'Mt y<sup>-1</sup>')
mapping(county_df[county_df.variety == 'base'],
'prod_Mt', 'Reds', 'Mt y<sup>-1</sup>',
kwargs={"range_color": [0.0, 2.0]})
# # Case study data
# Below we subset the full annual dataset (annual_df) to show three multi-county case study feedstock-sheds in IA, PA & TN, in order to look at yield variability (both farm-scale and inter-annual). These initial county lists are derived from Haley's analysis of the 2016 Billion Ton Study (looking specifically at combined switchgrass and Miscanthus production potential in the year 2040 at a biomass price of $80/ton). I took her list of high-production-density counties, and selected 4–5 contiguous ones for each case study.
#
# 
# ## Determine FIPS lists
# +
# define dictionary of case study counties by name
county_sets = {"IA": ['Ringgold', 'Taylor', 'Union', 'Adams', 'Adair'],
"TN": ['Maury', 'Marshall', 'Giles', 'Bedford', 'Moore'],
"PA": ['Adams', 'Cumberland', 'Franklin', 'York']}
# "KS": ['Cheyenne', 'Rawlins', 'Thomas', 'Sherman']
# +
# get fips codes for case study counties
all_fips_df = pd.read_csv('All_FIPS.csv')
all_fips_df.rename(columns={'FIPS': 'fips'}, inplace=True)
fips_sets = {}
for cs in county_sets:
fips_list = []
for county in county_sets[cs]:
fips = all_fips_df.loc[(all_fips_df.County == county) & (all_fips_df.ST == cs), 'fips'].item()
fips_list.append(fips)
fips_sets[cs] = fips_list
fips_sets
# +
# verify case study FIPS by mapping
map_df = pd.DataFrame(columns=('cs', 'fips', 'map_value'))
for i, cs in enumerate(fips_sets):
for fips in fips_sets[cs]:
map_df.loc[len(map_df)] = [cs, fips, i+1]
mapping(map_df, 'map_value', '', 'Case study')
# -
# ## Subset the annual data
# +
# extract timestamp for original simulations to use in case study file names
base_ts = base_sims.split('_')[0]
drought_ts = drought_sims.split('_')[0]
for CS in fips_sets:
# new DataFrame containing a single cycle of historic weather for the case study counties
cs_df = area_df[(area_df.fips.isin(fips_sets[CS]) & area_df.time.between(2020, 2050))]
# convert area yield rates to total county production in mega-tonne (Mt) units
cs_df['prod_Mt'] = cs_df['yield_Mg_ha'] * cs_df['tot_ha'] * 1.0e-6
# merge county names & ST abbreviation for readability
cs_df = pd.merge(all_fips_df[['fips', 'County', 'ST']], cs_df, on='fips')
cs_df.to_csv('{}_case_study_{}_{}.csv'.format(CS, base_ts, drought_ts), index=False)
# display a representative subsetted DataFrame
cs_df
# -
# # Case study visualization
##### Set up case study visualization to run one-at-a-time based on specified case study?
cs = 'IA'
cs_file_suffix = '_case_study_2019-09-16,13.26_2019-11-01,00.37.csv'
cs_df = pd.read_csv(cs+cs_file_suffix)
cs_df
# ## Plot variability
# new DataFrame grouping case study data by county & year
cs_time_df = cs_df[['County', 'time', 'prod_Mt']].groupby(['County', 'time']).sum()
cs_time_df
# plot annual total production across all counties
cs_time_df.unstack(level=0).plot(kind='bar', stacked=True)
plt.title("Inter-annual variability in total regional production")
plt.xlabel("simulation year")
plt.ylabel("total feedstock production (Mt)")
plt.legend(loc=3)
# +
# determine lowest & highest-yielding years
extrema_df = cs_time_df.reset_index()
del extrema_df['County']
extrema_df = extrema_df.groupby(['time']).sum()
extrema_df.sort_values(by=['prod_Mt'], inplace=True)
year_sort = list(extrema_df.index)
# extrema_years = (year_sort[0], year_sort[len(year_sort)//2], year_sort[-1])
extrema_years = (year_sort[0], year_sort[-1])
print(extrema_years)
# +
##### At this point, need to loop through 'base' and 'drought_tol' scenarios separately
fig, ax = plt.subplots(2, 1, sharex=True)
fig.suptitle('{} farm-level yield variability'.format(cs))
plt.xlabel("biomass yield (Ma ha-1)")
plt.legend(loc=3)
varieties = ['base', 'drought_tol']
for i, variety in enumerate(varieties):
hist_df = cs_df[(cs_df.time.isin(extrema_years)) &
(cs_df.variety == variety)][['strata_no', 'time', 'yield_Mg_ha']]
hist_df = hist_df.pivot(index='strata_no', columns='time', values='yield_Mg_ha')
hist_df.plot.hist(bins=40, alpha=0.5, ax=ax[i])
ax[i].set_title('{} variety'.format(variety), fontsize=10)
ax[i].set_ylabel("# strata", fontsize=10)
# + [markdown] heading_collapsed=true
# ## DayCent–BTS2016 comparison
# + hidden=true
# import BTS data
bts_df = pd.read_excel("Bioenergy KDF Billion Ton Report County Level Data_2040-JLF.xlsx", sheet_name='SG & M_PA')
bts_df.rename(columns={'Prod':'BTS_prod_Mg'}, inplace=True)
# compute DayCent total county production
county_df['daycent_prod_Mg'] = county_df['yield_Mg_ha'] * county_df['tot_ha']
# merge data and create scatterplot
comp_df = pd.merge(bts_df[['Fips', 'BTS_prod_Mg']],
county_df[['fips', 'daycent_prod_Mg']],
left_on='Fips', right_on='fips')
comp_df.plot.scatter(x='BTS_prod_Mg', y='daycent_prod_Mg')
# plt.plot([0, 1.2e6], [0, 1.2e6], color='k')
plt.xlabel("BTS2016 yield (Mg y-1)")
plt.ylabel("DayCent yield (Mg y-1)")
plt.title("Comparing county-level estimates from BTS2016 and DayCent")
# + [markdown] heading_collapsed=true
# # Climate analysis
# Here's some initial exploratory code to parse a DayCent-format weather file and analyze inter-annual variability in growing-season temperatures and precipitation.
# + hidden=true
weather_file1 = "NARR_89_234.wth"
weather_df1 = pd.read_csv(weather_file1, sep='\t', usecols=range(1, 7),
names=['DayOfMonth','Month', "Year", "DayOfYear", 'Tmax_C', 'Tmin_C', "Precip_cm"])
weather_file2 = "NARR_89_231.wth"
weather_df2 = pd.read_csv(weather_file2, sep='\t', usecols=range(1, 7),
names=['DayOfMonth','Month', "Year", "DayOfYear", 'Tmax_C', 'Tmin_C', "Precip_cm"])
weather_df2
# + hidden=true
wth_df = pd.merge(weather_df1, weather_df2, on=['Month', 'Year', 'DayOfYear'], suffixes=['_234', '_231'])
seasonal_wth_df = wth_df[wth_df['Month'].isin([5, 6, 7, 8, 9])]
seasonal_wth_df['Tavg_C_231'] = (seasonal_wth_df['Tmin_C_231'] + seasonal_wth_df['Tmax_C_231']) / 2.0
seasonal_wth_df['Tavg_C_234'] = (seasonal_wth_df['Tmin_C_234'] + seasonal_wth_df['Tmax_C_234']) / 2.0
annunal_wth_df = seasonal_wth_df.groupby('Year').agg({'Tmax_C_231': 'mean',
'Tavg_C_231': 'mean',
'Precip_cm_231': 'sum',
'Tmax_C_234': 'mean',
'Tavg_C_234': 'mean',
'Precip_cm_234': 'sum'})
annunal_wth_df['Precip_diff_cm'] = annunal_wth_df['Precip_cm_231'] - annunal_wth_df['Precip_cm_234']
annunal_wth_df.head()
# + hidden=true
fig = plt.figure()
spec = gridspec.GridSpec(ncols=1, nrows=2, height_ratios=[1, 2])
fig.suptitle("Difference between two weather grid centroids, 100km apart")
ax0 = fig.add_subplot(spec[0])
ax0.bar(annunal_wth_df.index, annunal_wth_df.Precip_diff_cm)
plt.setp(ax0.get_xticklabels(), visible=False)
plt.ylabel("Difference (cm)")
ax1 = fig.add_subplot(spec[1], sharex=ax0)
ax1.plot(annunal_wth_df.Precip_cm_231)
ax1.plot(annunal_wth_df.Precip_cm_234)
plt.xlabel("Year")
plt.ylabel("May–Sept. total precip (cm)")
# + hidden=true
plt.scatter(annunal_wth_df.Tavg_C_231, annunal_wth_df.Precip_cm_231)
plt.title("Inter-annual variability in growing season weather")
plt.xlabel("May–Sept. average air temperature (C)")
plt.ylabel("May–Sept. total precipitation (cm)")
``` |
{
"source": "johnlierz/eLink_Instrumentation",
"score": 3
} |
#### File: VNA/python/plotImpedance.py
```python
from itertools import islice
import sys, re, os
import numpy as np
import statistics
import skrf as rf
#print(rf.__version__)
#print(.__version__)
#rf.stylely()
import pylab
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib import style
import pickle as pl
def split(word):
return [char for char in word]
def display_mean_impedance(ax, t1, t2, col):##https://www.tutorialfor.com/questions-285739.htm
lines = ax.get_lines()
# delete any other array correponding to a line drawn in ax but the last one. This is a
# brute force way of resetting the line data to the data current line
if len(lines)>1:
del lines[:-1]
# ressure that length of line is 1.
#print('size of lines:', len(lines))
# store the line arrays into list. Every line drawn on the ax is considered as data
Y = [line.get_ydata() for line in lines]
X = [line.get_xdata() for line in lines]
# create a table, and since the list X and Y should have size=1, place the first
# element (array) in pandas table columns t and Z
df = pd.DataFrame()
df['t'] = X[0]
df['Z'] = Y[0]
# get the mean value of Z for a given time difference
Z_mean = df.query('t >=@t1 & t<=@t2').agg({'Z': 'mean'})
print('Mean impedance from ', t1, 'ns and ', t2, 'ns =', Z_mean.values, 'for', lines[0])
# plot the average line
x_coor = [t1, t2]
y_coor = [Z_mean, Z_mean]
ax.plot(x_coor, y_coor, color=col, linewidth=1, label='', linestyle='--')
def set_axes(ax, title, ymin, ymax, xmin, xmax, nolim):
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.grid(True, color='0.8', which='minor')
ax.grid(True, color='0.4', which='major')
ax.set_title(title) #Time domain
if nolim==False:
ax.set_xlim((xmin, xmax))
ax.set_ylim((ymin, ymax))
plt.tight_layout()
#####################
cable_length = '100'
subfile = '0'; comp=''; S_ij=''
if subfile == '0': comp = '12'#12
elif subfile == '1': comp = '21'
if comp == '11' and subfile == '0': S_ij = '11'
elif comp == '12'and subfile == '0': S_ij = '21'
elif comp == '21' and subfile == '1': S_ij = '11'
i = int(split(S_ij)[0])
j = int(split(S_ij)[1])
#print('S_ij ----->', i, j)
out_dir = 'Plots'
sub_out_dir = 'Redo_VNA'
#####################
# *.s2p Files format
# Each record contains 1 stimulus value and 4 S-parameters (total of 9 values)
#Stim Real (S11) Imag(S11) Real(S21) Imag(S21) Real(S12) Imag(S12) Real(S22) Imag(S22)
# ==== our file format for vna_0: ====
#!freq RelS11 ImS11 RelS12 ImS12 RelS13 ImS13 RelS14 ImS14
# parameter in file => read from software
# S11 S13 00 01 S11 S12
# ----> ---->
# S12 S14 10 11 S21 S22
# ==== our file format for vna_1: ====
#!freq RelS21 ImS21 RelS22 ImS22 RelS23 ImS23 RelS24 ImS24
# parameter in file => read from software
# S21 S23 00 01 S11 S12
# ----> ---->
# S22 S24 10 11 S21 S22
#######################
if cable_length == '20':
net1 = rf.Network(out_dir+'/'+sub_out_dir+'/TP_20cm_12_ChD3_ChCMD.vna_'+subfile+'.s2p', f_unit='ghz')#33
net2 = rf.Network('Plots/TP_20cm_31_ChD1.vna_'+subfile+'.s2p', f_unit='ghz') #23
net3 = rf.Network('Plots/TP_20cm_49_ChD1.vna_'+subfile+'.s2p', f_unit='ghz')
elif cable_length == '35':
net4 = rf.Network('Plots/Redo_VNA/TP_35cm_57_ChD0.vna_'+subfile+'.s2p', f_unit='ghz')
net5 = rf.Network('Plots/Redo_VNA/TP_35cm_57_ChD1.vna_'+subfile+'.s2p', f_unit='ghz')
net6 = rf.Network('Plots/Redo_VNA/TP_35cm_57_ChCMD.vna_'+subfile+'.s2p', f_unit='ghz')
#net8 = rf.Network('Plots/TP_35cm_56_ChD1.vna_'+subfile+'.s2p', f_unit='ghz')
#net10 = rf.Network('Plots/Redo_VNA/TP_35cm_60_ChD1_Redo.vna_'+subfile+'.s2p', f_unit='ghz')
elif cable_length == '100':
#net1 = rf.Network('Plots/Redo_VNA/TP_1m_53_ChD0_rwy.vna_'+subfile+'.s2p', f_unit='ghz')
net1 = rf.Network('Plots/Redo_VNA/TP_1m_13_ChD3_ChCMD.vna_'+subfile+'.s2p', f_unit='ghz')
net2 = rf.Network('Plots/Redo_VNA/TP_1m_55_ChD1.vna_'+subfile+'.s2p', f_unit='ghz')
#net3 = rf.Network('Plots/Redo_VNA/TP_1m_55_ChCMD.vna_'+subfile+'.s2p', f_unit='ghz')
net3 = rf.Network('Plots/Redo_VNA/082620_TP_53_1m_ChD0_SK.vna_'+subfile+'.s2p', f_unit='ghz') #55 34G_CHD1
#net3 = rf.Network('Plots/TP_1m_32_ChD1.vna_'+subfile+'.s2p', f_unit='ghz')
elif cable_length == '140':
net1 = rf.network.Network('Plots/Redo_VNA/TP_1p4m_41_ChD0_redo_v2.vna_'+subfile+'.s2p', f_unit='ghz')
net2 = rf.network.Network('Plots/Redo_VNA/TP_1p4m_41_ChD1_redo_v2.vna_'+subfile+'.s2p', f_unit='ghz')
net3 = rf.network.Network('Plots/Redo_VNA/TP_1p4m_41_ChCMD_redo_v2.vna_'+subfile+'.s2p', f_unit='ghz')
elif cable_length == '200':
print('Plots/Redo_VNA/TP_2m_72_ChD0.vna_'+subfile+'.s2p')
net1 = rf.network.Network('Plots/Redo_VNA/TP_2m_72_ChD0.vna_'+subfile+'.s2p', f_unit='ghz')
net2 = rf.network.Network('Plots/Redo_VNA/TP_2m_72_ChD1.vna_'+subfile+'.s2p', f_unit='ghz')
net3 = rf.network.Network('Plots/Redo_VNA/TP_2m_72_ChCMD.vna_'+subfile+'.s2p', f_unit='ghz')
else:
filename = out_dir+'/'+sub_out_dir+'/FPC_0p6m.vna_'+subfile+'.s2p'
net1 = rf.network.Network(filename, f_unit='ghz')#straight_SMA.vna, FPC_0p6
netref = rf.network.Network(out_dir+'/'+sub_out_dir+'/straight_SMA.vna_'+subfile+'.s2p', f_unit='ghz')
with style.context('seaborn-ticks'):
fig0 = plt.figure(figsize=(10,4))
ax0=plt.subplot(1,2,1)
ax1=plt.subplot(1,2,2)
ax0.xaxis.set_minor_locator(AutoMinorLocator(2))
ax0.yaxis.set_minor_locator(AutoMinorLocator(2))
ax0.grid(True, color='0.8', which='minor')
ax0.grid(True, color='0.4', which='major')
if cable_length == '20':
## ---Frequency Domain Plots---:
net1_dc = net1[i,j].extrapolate_to_dc(kind='linear')
net2_dc = net2[i,j].extrapolate_to_dc(kind='linear')
net3_dc = net3[i,j].extrapolate_to_dc(kind='linear')
netref_dc = netref[i,j].extrapolate_to_dc(kind='linear')
net1_dc.plot_s_db(label='S'+comp+', TP_20cm_12 (32)', ax=ax0, color='b')
net2_dc.plot_s_db(label='S'+comp+', TP_20cm_31 (36)', ax=ax0, color='r')
net3_dc.plot_s_db(label='S'+comp+', TP_20cm_49 (34)', ax=ax0, color='c')
netref_dc.plot_s_db(label='S'+comp+', Calibration', ax=ax0, color='g')
set_axes(ax0, 'Frequency Domain', 100000, 6000000000, -50.0, 50.0, 1)
## ---Time Domain Plots---:
net1_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_20cm_12 (32)', ax=ax1, color='b')
display_mean_impedance(ax1, 1.0, 4.0, 'b')
net2_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_20cm_31 (36)', ax=ax1, color='r')
display_mean_impedance(ax1, 1.0, 4.0, 'r')
net3_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_20cm_49 (34)', ax=ax1, color='c')
display_mean_impedance(ax1, 1.0, 4.0, 'c')
netref_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', Calibration', ax=ax1, color='g')
display_mean_impedance(ax1, 0.0, 30.0, 'g')
set_axes(ax1, 'Time Domain', 0.0, 400.0, 0.0, 30.0, 0)
plt.show()
elif cable_length == '35':
net4_dc = net4[i,j].extrapolate_to_dc(kind='linear')
net5_dc = net5[i,j].extrapolate_to_dc(kind='linear')
net6_dc = net6[i,j].extrapolate_to_dc(kind='linear')
#net8_dc = net8[i,j].extrapolate_to_dc(kind='linear')
#net10_dc = net10[i,j].extrapolate_to_dc(kind='linear')
netref_dc = netref[i,j].extrapolate_to_dc(kind='linear')
net4_dc.plot_s_db(label='S'+comp+', TP_35cm_57_D0 (34)', ax=ax0)
net5_dc.plot_s_db(label='S'+comp+', TP_35cm_57_D1 (34)', ax=ax0)
net6_dc.plot_s_db(label='S'+comp+', TP_35cm_57_CMD (34)', ax=ax0)
#net8_dc.plot_s_db(label='S'+comp+', TP_35cm_56 (34)', ax=ax0)
#net10_dc.plot_s_db(label='S'+comp+', TP_35cm_60_redo (34)', ax=ax0)
netref_dc.plot_s_db(label='S'+comp+', Calibration', ax=ax0)
set_axes(ax0, 'Frequency Domain', 100000, 6000000000, -50.0, 50.0, 1)
net4_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_35cm_57_D0 (34)', ax=ax1)
net5_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_35cm_57_D1 (34)', ax=ax1)
net6_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_35cm_57_CMD (34)', ax=ax1)
#net8_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_35cm_56 (34)', ax=ax1)
#net10_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_35cm_60_redo (34)', ax=ax1)
netref_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', Calibration', ax=ax1)
set_axes(ax1, 'Time Domain', 0.0, 200.0, 0.0, 30.0, 0)
elif cable_length == '100':
net1_dc = net1[i,j].extrapolate_to_dc(kind='linear')
net2_dc = net2[i,j].extrapolate_to_dc(kind='linear')
net3_dc = net3[i,j].extrapolate_to_dc(kind='linear')
netref_dc = netref[i,j].extrapolate_to_dc(kind='linear')
net1_dc.plot_s_db(label='S'+comp+', TP_1m_13 (32)', ax=ax0, color='b')
net2_dc.plot_s_db(label='S'+comp+', TP_1m_55_D1 (34)', ax=ax0, color='r')
net3_dc.plot_s_db(label='S'+comp+', TP_1m_53_D0 (36)', ax=ax0, color='k')
netref_dc.plot_s_db(label='S'+comp+', Calibration', ax=ax0)
set_axes(ax0, 'Frequency Domain', 100000, 6000000000, -50.0, 50.0, 1)
net1_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_1m_13 (32)', ax=ax1, color='b')
display_mean_impedance(ax1, 4.0, 8.0, 'b')
net2_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_1m_55_D1 (34)', ax=ax1, color ='r')
display_mean_impedance(ax1, 4.0, 8.0, 'r')
net3_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_1m_53_D0 (36)', ax=ax1, color='k')
display_mean_impedance(ax1, 4.0, 8.0, 'k')
netref_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', Calibration', ax=ax1, color='g')
display_mean_impedance(ax1, 4.0, 8.0, 'g')
set_axes(ax1, 'Time Domain', 0.0, 200.0, 0.0, 30.0, 0)
elif cable_length == '140':
net1_dc = net1[i,j].extrapolate_to_dc(kind='linear')
net2_dc = net2[i,j].extrapolate_to_dc(kind='linear')
net3_dc = net3[i,j].extrapolate_to_dc(kind='linear')
netref_dc = netref[i,j].extrapolate_to_dc(kind='linear')
net1_dc.plot_s_db(label='S'+comp+', TP_1p4m_41_D0 (34)', ax=ax0)#s11
net2_dc.plot_s_db(label='S'+comp+', TP_1p4m_41_D1 (34)', ax=ax0)
net3_dc.plot_s_db(label='S'+comp+', TP_1p4m_41_CMD (34)', ax=ax0)
netref_dc.plot_s_db(label='S'+comp+', Calibration', ax=ax0) #s11
set_axes(ax0, 'Frequency Domain', 100000, 6000000000, -200.0, 100.0, 1)
net1_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_1p4m_34_D0 (34)', ax=ax1)
net2_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_1p4m_34_D1 (34)', ax=ax1)
net3_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_1p4m_34_CMD (34)', ax=ax1)
netref_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', Calibration', ax=ax1)
set_axes(ax1, 'Time Domain', 0.0, 300.0, 0.0, 35.0, 0)
elif cable_length == '200':
net1_dc = net1[i,j].extrapolate_to_dc(kind='linear')
net2_dc = net2[i,j].extrapolate_to_dc(kind='linear')
net3_dc = net3[i,j].extrapolate_to_dc(kind='linear')
netref_dc = netref[i,j].extrapolate_to_dc(kind='linear')
net1_dc.plot_s_db(label='S'+comp+', TP_2m_72_D0 (36)', ax=ax0)
net2_dc.plot_s_db(label='S'+comp+', TP_2m_72_D1 (36)', ax=ax0)
net3_dc.plot_s_db(label='S'+comp+', TP_2m_72_CMD (36)', ax=ax0)
netref_dc.plot_s_db(label='S'+comp+', Calibration', ax=ax0)
set_axes(ax0, 'Frequency Domain', 100000, 6000000000, -50.0, 50.0, 1)
net1_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_2m_72_D0 (36)', ax=ax1)
net2_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_2m_72_D1 (36)', ax=ax1)
net3_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', TP_2m_72_CMD (36)', ax=ax1)
netref_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', Calibration', ax=ax1)
set_axes(ax1, 'Time Domain', 0.0, 200.0, 0.0, 35.0, 0)
else:
# Freq Domain
net1_dc = net1[i,j].extrapolate_to_dc(kind='linear')
net1_dc.plot_s_db(label='S'+comp+', FPC_0p6', ax=ax0, color='b')
#netref_dc = netref[1,1].extrapolate_to_dc(kind='linear')
#netref_dc.plot_s_db(label='S'+comp+', Calibration', ax=ax0)
set_axes(ax0, 'Frequency Domain', 100000, 6000000000, -50.0, 50.0, 1)
# Time Domain
net1_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', FPC_0p6', ax=ax1, color='b')
display_mean_impedance(ax1, 0.0, 4.0, 'b')
#netref_dc.plot_z_time_step(pad=0, window='hamming', z0=50, label='TD'+comp+', Calibration', ax=ax1)
#display_mean_impedance(ax1, 0.0, 4.0, 'b')
set_axes(ax1, 'Time Domain', 0.0, 200.0, 0.0, 30.0, 0)
#fig0.savefig('Plots/36vs34_'+cable_length+'cm_freq_time_Z_rf_'+comp+redo+'.png')
#fig0.savefig('Plots/36G_'+cable_length+'cm_freq_time_Z_rf_'+comp+redo+'.png')
#fig0.savefig('Plots/SMA_'+cable_length+'freq_time_Z_rf_'+comp+redo+'zoomout.png')
#fig0.savefig('Plots/small_SMA_'+cable_length+'freq_time_Z_rf_'+comp+redo+'zoomout.png')
#fig0.savefig('Plots/FPC_0p6_'+cable_length+'freq_time_Z_rf_'+comp+redo+'.png')
fig0.savefig('Plots/TP_'+cable_length+'cm_freq_time_Z_rf_'+comp+'.png')
pylab.show()
```
#### File: VNA/python/readVNAData.py
```python
from itertools import islice
import re, decimal
import matplotlib.pyplot as plt
#import matplotlib.ticker as ticker
import numpy as np
import pickle as pl
def name(input):
match = re.match(r'TP_\w+_\d+', input)
name = match.group()
if '1p4' in name: name = name.replace('1p4', '1.4')
return name
def Z_in(Z0, S11_r, S11_i):
Z_in_R = Z0*( (1 - pow(S11_r,2) - pow(S11_i,2) )/( pow((1-S11_r),2) + pow(S11_i,2)) )
Z_in_I = Z0*( (2*S11_i)/(pow((1 - S11_r),2) + pow(S11_i,2)) )
return Z_in_R, Z_in_I
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--input', metavar='T', type='string', action='store',
default='TP_1m_33_ChD1.vna.txt',
dest='input',
help='input text file')
parser.add_option('--directory', metavar='T', type='string', action='store',
default='Plots',
dest='directory',
help='directory to store plots')
(options,args) = parser.parse_args()
# ==========end: options =============
input = options.input
dir_in= options.directory
cable = name(input)
date = ''
data_groups = []
group_1 = [ ['index', 'freq', 'S11_Real', 'S11_Img', 'S12_Real', 'S12_Img', 'S13_Real', 'S13_Img', 'S14_Real', 'S14_Img']]
group_2 = [ ['index', 'freq', 'S21_Real', 'S21_Img', 'S22_Real', 'S22_Img', 'S23_Real', 'S23_Img', 'S24_Real', 'S24_Img']]
# split the file into data chunks corresponding to S parameter matrix by date
# ---------------------------------------------------------------------------
with open(input) as f:
for i, line in enumerate(f):
z = re.match(r'MODEL:\s+DATE:\s+\d+/\d+/\d+', line)
if z:
date = z.group().split()[2]
data_groups.append(i)
print('line boundaries for data group: ', data_groups)
f.close()
print('Data was taken on: ', date)
# reopen the text file, and jump to lines of interests correponding to different data groups.
# ------------------------------------------------------------------------------------------
with open(input) as f:
found_S11=False; start_reading=False;
for i, l in enumerate(f):
# group 1:
if i >= int(data_groups[0]) and i < data_groups[1]:
# make sure that the group includes S11 parameter
check = re.findall(r'PARAMETER:\s+(.\w+.)\s+(.\w+.)\s+(.\w+.)\s+(.\w+.)\s+', l)
if len(check)!= 0:
if 'S11' in check[0][0]: found_S11=True
else:
print('1st parameter: ',check[0][0], 'better adapt the script to read S11')
break
# find a starting line to read the tables
start = re.match(r'FREQUENCY\s+POINTS:\s+',l)
if start: start_reading = True
if found_S11 and start_reading:
rows = l.strip().split('\n')[0].split('\t')
# start writting from 1st row of the table index
if not rows[0].isdigit(): continue
group_1.append(l.strip().split('\t'))
# draw the plots:
# ---------------
freq, z_in_real=[],[]
freq, z_in_img =[],[]
for i, c in enumerate(group_1):
if i==0: continue; # skip the title row
freq.append(float(c[1]))
#time.append(1./float(c[1]))
S11_r_in = float(c[2])
S11_i_in = float(c[3])
S12_r_in = float(c[4])
S12_i_in = float(c[5])
#print('S12_r', S12_r_in)
Z_in_real, Z_in_img = Z_in(50.0, S11_r_in, S11_i_in )
#Z_in_real = Z_in(50.0, S11_r_in, S11_i_in )
z_in_real.append(Z_in_real)
z_in_img.append(Z_in_img)
#print('freq: ', freq)
#print('Z_real: ', z_in_real)
#print('time: ', time)
fig = plt.figure(figsize=(8,5))
ax0 = fig.add_subplot(1,2,1)
major_ticks = np.arange(0, 6.5, 0.5)
minor_ticks = np.arange(0, 6.5, 0.1)
ax0.set_xticks(major_ticks)
ax0.set_xticks(minor_ticks, minor=True)
ax0.grid(which='minor', alpha=0.2)
ax0.grid(which='major', alpha=0.5)
ax0.set_xlabel('Frequency (GHz)')
ax0.set_ylabel('Z (Real) (\u03A9)')
plt.plot(freq, z_in_real, 'r', linewidth=2.0, label=cable)
ax0.legend()
fig1 = plt.gcf()
ax1 = fig.add_subplot(1,2,2)
ax1.set_xticks(major_ticks)
ax1.set_xticks(minor_ticks, minor=True)
ax1.set_xlabel('Frequency (GHz)')
ax1.set_ylabel('Z (Img) (\u03A9)')
ax1.grid(which='minor', alpha=0.2)
ax1.grid(which='major', alpha=0.5)
plt.plot(freq, z_in_img, 'b', linewidth=2.0, label=cable)
plt.show()
plt.draw()
fig1.savefig(dir_in+'/'+cable+'.png')
pl.dump(fig1, open(dir_in+'/'+cable+'.pickle', 'wb'))
``` |
{
"source": "JohnLieske/ansible-navigator",
"score": 3
} |
#### File: ansible_navigator/command_runner/command_runner.py
```python
import multiprocessing
import subprocess
from dataclasses import dataclass
from dataclasses import field
from queue import Queue
from typing import Callable
from typing import List
from typing import Optional
from ..utils.definitions import LogMessage
PROCESSES = (multiprocessing.cpu_count() - 1) or 1
@dataclass(frozen=False)
class Command:
"""Data structure for details of a command to be run.
A ``Command`` is updated after instantiated with details from either
``stdout`` or ``stderr``.
"""
# pylint: disable=too-many-instance-attributes
identity: str
command: str
post_process: Callable
return_code: int = 0
stdout: str = ""
stderr: str = ""
details: List = field(default_factory=list)
errors: str = ""
messages: List[LogMessage] = field(default_factory=list)
@property
def stderr_lines(self):
"""Produce a list of stderr lines.
:returns: A list of stderr lines
"""
return self.stderr.splitlines()
@property
def stdout_lines(self):
"""Produce a list of stdout lines.
:returns: A list of stdout lines
"""
return self.stdout.splitlines()
def run_command(command: Command) -> None:
"""Run a command.
:param command: Command to be run
"""
try:
proc_out = subprocess.run(
command.command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True,
universal_newlines=True,
shell=True,
)
command.return_code = proc_out.returncode
command.stdout = proc_out.stdout
except subprocess.CalledProcessError as exc:
command.return_code = exc.returncode
command.stdout = str(exc.stdout)
command.stderr = str(exc.stderr)
def worker(pending_queue: multiprocessing.Queue, completed_queue: multiprocessing.Queue) -> None:
"""Read pending, run, post process, and place in completed.
:param pending_queue: All pending commands
:param completed_queue: All completed commands
"""
while True:
command = pending_queue.get()
if command is None:
break
run_command(command)
command.post_process(command)
completed_queue.put(command)
class CommandRunner:
"""Functionality for running commands."""
def __init__(self):
"""Initialize the command runner."""
self._completed_queue: Optional[Queue] = None
self._pending_queue: Optional[Queue] = None
@staticmethod
def run_single_proccess(commands: List[Command]):
"""Run commands with a single process.
:param commands: All commands to be run
:returns: The results from running all commands
"""
results: List[Command] = []
for command in commands:
run_command(command)
command.post_process(command)
results.append(command)
return results
def run_multi_proccess(self, commands: List[Command]) -> List[Command]:
"""Run commands with multiple processes.
Workers are started to read from pending queue.
Exit when the number of results is equal to the number
of commands needing to be run.
:param commands: All commands to be run
:returns: The results from running all commands
"""
if self._completed_queue is None:
self._completed_queue = multiprocessing.Manager().Queue()
if self._pending_queue is None:
self._pending_queue = multiprocessing.Manager().Queue()
self.start_workers(commands)
results: List[Command] = []
while len(results) != len(commands):
results.append(self._completed_queue.get())
return results
def start_workers(self, jobs):
"""Start the workers.
:param jobs: List of commands to be run
"""
worker_count = min(len(jobs), PROCESSES)
processes = []
for _proc in range(worker_count):
proc = multiprocessing.Process(
target=worker,
args=(self._pending_queue, self._completed_queue),
)
processes.append(proc)
proc.start()
for job in jobs:
self._pending_queue.put(job)
for _proc in range(worker_count):
self._pending_queue.put(None)
for proc in processes:
proc.join()
```
#### File: ansible_navigator/configuration_subsystem/configurator.py
```python
import logging
import os
from copy import deepcopy
from typing import List
from typing import Tuple
from typing import Union
from ..utils.definitions import ExitMessage
from ..utils.definitions import ExitPrefix
from ..utils.definitions import LogMessage
from ..utils.functions import oxfordcomma
from ..utils.functions import shlex_join
from ..utils.json_schema import validate
from ..utils.serialize import SafeLoader
from ..utils.serialize import yaml
from ..utils.version_migration.migrate import MigrationType
from ..utils.version_migration.migrate import run_all_migrations
from .definitions import ApplicationConfiguration
from .definitions import Constants as C
from .definitions import SettingsEntry
from .parser import Parser
from .transform import to_schema
from .utils import parse_ansible_cfg
class Configurator:
"""The configuration class."""
def __init__(
self,
params: List[str],
application_configuration: ApplicationConfiguration,
apply_previous_cli_entries: Union[List, C] = C.NONE,
skip_roll_back: bool = False,
):
"""Initialize the configuration variables.
:param params: A list of parameters e.g. ['-x', 'value']
:param application_configuration: An application specific Config object
:param apply_previous_cli_entries: Apply previous USER_CLI values where the current value
is not a USER_CLI sourced value, a list of entry names
['all'] will apply all previous
:param skip_roll_back: Skip roll back on error
"""
self._apply_previous_cli_entries = apply_previous_cli_entries
self._config = application_configuration
self._exit_messages: List[ExitMessage] = []
self._messages: List[LogMessage] = []
self._params = params
self._sanity_check()
self._skip_rollback = skip_roll_back
self._unaltered_entries = deepcopy(self._config.entries)
def _sanity_check(self) -> None:
if self._apply_previous_cli_entries is not C.NONE:
if self._config.internals.initializing:
raise ValueError("'apply_previous_cli' cannot be used while initializing")
if not self._config.initial:
raise ValueError("'apply_previous_cli' enabled prior to an initialization")
def _roll_back(self) -> None:
"""In the case of a rollback, log the configuration state prior to roll back."""
if self._skip_rollback:
return
message = "Configuration errors encountered, rolling back to previous configuration."
self._messages.append(LogMessage(level=logging.WARNING, message=message))
for entry in self._config.entries:
message = f"Prior to rollback: {entry.name} = '{entry.value.current}'"
message += f" ({type(entry.value.current).__name__}/{entry.value.source.value})"
self._messages.append(LogMessage(level=logging.DEBUG, message=message))
self._config.entries = self._unaltered_entries
for entry in self._config.entries:
message = f"After rollback: {entry.name} = '{entry.value.current}'"
message += f" ({type(entry.value.current).__name__}/{entry.value.source.value})"
self._messages.append(LogMessage(level=logging.DEBUG, message=message))
message = "Configuration rollback complete."
self._messages.append(LogMessage(level=logging.DEBUG, message=message))
def configure(self) -> Tuple[List[LogMessage], List[ExitMessage]]:
"""Perform the configuration.
Save the original entries, if an error is encountered
restore them.
:returns: Log messages
"""
self._config.original_command = self._params
shlex_joined = shlex_join(self._config.original_command)
cmd_message = f"Command provided: '{shlex_joined}'"
self._messages.append(LogMessage(level=logging.DEBUG, message=cmd_message))
warn_message = "Issues were found while applying the settings."
warning = ExitMessage(message=warn_message, prefix=ExitPrefix.WARNING)
command = ExitMessage(message=cmd_message, prefix=ExitPrefix.HINT)
self._restore_original()
self._apply_defaults()
self._apply_settings_file()
self._apply_environment_variables()
self._apply_cli_params()
if self._exit_messages:
self._exit_messages[0:0] = [warning, command]
self._roll_back()
return self._messages, self._exit_messages
self._apply_previous_cli_to_current()
self._retrieve_ansible_cfg()
self._post_process()
self._check_choices()
if self._exit_messages:
self._exit_messages[0:0] = [warning, command]
self._roll_back()
return self._messages, self._exit_messages
if self._config.internals.initializing:
self._config.initial = deepcopy(self._config)
# Our work is done, set the initialization flag to false
self._config.internals.initializing = False
return self._messages, self._exit_messages
def _argparse_error_handler(self, message: str):
"""Call back for argparser error handling.
:param message: A message from the parser
:type message: str
"""
self._exit_messages.append(ExitMessage(message=message))
def _restore_original(self) -> None:
"""Restore the current values back to NOT_SET."""
for entry in self._config.entries:
if self._config.internals.initializing or entry.change_after_initial:
entry.value.current = C.NOT_SET
entry.value.source = C.NOT_SET
else:
message = f"'{entry.name}' cannot be reconfigured. (restore original)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
def _apply_defaults(self) -> None:
for entry in self._config.entries:
if self._config.internals.initializing or entry.change_after_initial:
if entry.value.default is not C.NOT_SET:
entry.value.current = entry.value.default
entry.value.source = C.DEFAULT_CFG
else:
message = f"'{entry.name}' cannot be reconfigured. (apply defaults)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
def _apply_settings_file(self) -> None:
# pylint: disable=too-many-locals
settings_filesystem_path = self._config.internals.settings_file_path
if not isinstance(settings_filesystem_path, str):
return
run_all_migrations(
settings_file_str=settings_filesystem_path,
migration_types=(MigrationType.SETTINGS_FILE,),
)
with open(settings_filesystem_path, "r", encoding="utf-8") as fh:
try:
config = yaml.load(fh, Loader=SafeLoader)
if config is None:
raise ValueError("Settings file cannot be empty.")
except (yaml.scanner.ScannerError, yaml.parser.ParserError, ValueError) as exc:
exit_msg = f"Settings file found {settings_filesystem_path}, but failed to load it."
self._exit_messages.append(ExitMessage(message=exit_msg))
exit_msg = f" error was: '{' '.join(str(exc).splitlines())}'"
self._exit_messages.append(ExitMessage(message=exit_msg))
exit_msg = (
f"Try checking the settings file '{settings_filesystem_path}'"
"and ensure it is properly formatted"
)
self._exit_messages.append(
ExitMessage(message=exit_msg, prefix=ExitPrefix.HINT),
)
return
schema = to_schema(settings=self._config)
errors = validate(schema=schema, data=config)
if errors:
msg = (
"The following errors were found in the settings file"
f" ({settings_filesystem_path}):"
)
self._exit_messages.append(ExitMessage(message=msg))
self._exit_messages.extend(error.to_exit_message() for error in errors)
hint = "Check the settings file and compare it to the current version."
self._exit_messages.append(ExitMessage(message=hint, prefix=ExitPrefix.HINT))
hint = (
"The current version can be found here:"
" (https://ansible-navigator.readthedocs.io/en/latest/settings/"
"#ansible-navigator-settings)"
)
self._exit_messages.append(ExitMessage(message=hint, prefix=ExitPrefix.HINT))
hint = (
"The schema used for validation can be seen with"
" 'ansible-navigator settings --schema'"
)
self._exit_messages.append(ExitMessage(message=hint, prefix=ExitPrefix.HINT))
hint = (
"A sample settings file can be created with"
" 'ansible-navigator settings --sample'"
)
self._exit_messages.append(ExitMessage(message=hint, prefix=ExitPrefix.HINT))
return
for entry in self._config.entries:
settings_file_path = entry.settings_file_path(self._config.application_name)
path_parts = settings_file_path.split(".")
data = config
try:
for key in path_parts:
data = data[key]
if self._config.internals.initializing or entry.change_after_initial:
entry.value.current = data
entry.value.source = C.USER_CFG
else:
message = f"'{entry.name}' cannot be reconfigured. (settings file)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
except TypeError as exc:
exit_msg = (
"Errors encountered when loading settings file:"
f" {settings_filesystem_path}"
f" while loading entry {entry.name}, attempted: {settings_file_path}."
f"The resulting error was {str(exc)}"
)
self._exit_messages.append(ExitMessage(message=exit_msg))
exit_msg = (
f"Try checking the settings file '{settings_filesystem_path}'"
"and ensure it is properly formatted"
)
self._exit_messages.append(
ExitMessage(message=exit_msg, prefix=ExitPrefix.HINT),
)
return
except KeyError:
message = f"{settings_file_path} not found in settings file"
self._messages.append(LogMessage(level=logging.DEBUG, message=message))
def _apply_environment_variables(self) -> None:
for entry in self._config.entries:
set_env_var = os.environ.get(entry.environment_variable(self._config.application_name))
if set_env_var is not None:
if self._config.internals.initializing or entry.change_after_initial:
if entry.cli_parameters is not None and entry.cli_parameters.nargs in [
"+",
"*",
]:
entry.value.current = [
value.strip()
for value in set_env_var.split(entry.environment_variable_split_char)
]
else:
entry.value.current = set_env_var
entry.value.source = C.ENVIRONMENT_VARIABLE
else:
message = f"'{entry.name}' cannot be reconfigured. (environment variables)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
def _apply_cli_params(self) -> None:
parser = Parser(self._config).parser
setattr(parser, "error", self._argparse_error_handler)
parser_response = parser.parse_known_args(self._params)
if parser_response is None:
return
args, cmdline = parser_response
if cmdline:
# In the case a subcommand is not a positional, remove the --
additional_args = [arg for arg in cmdline if arg != "--"]
self._config.entry("cmdline").value.current = additional_args
self._config.entry("cmdline").value.source = C.USER_CLI
for param, value in vars(args).items():
if self._config.entry(param).subcommand_value is True and value is None:
continue
entry = self._config.entry(param)
if self._config.internals.initializing or entry.change_after_initial:
entry.value.current = value
entry.value.source = C.USER_CLI
else:
message = f"'{entry.name}' cannot be reconfigured. (cli params)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
def _post_process(self) -> None:
delayed = []
normal = []
# Separate normal and delayed entries so they can be processed in that order.
for entry in self._config.entries:
if entry.delay_post_process:
delayed.append(entry)
else:
normal.append(entry)
for entry in normal + delayed:
if self._config.internals.initializing or entry.change_after_initial:
processor = getattr(self._config.post_processor, entry.name, None)
if callable(processor):
messages, errors = processor(entry=entry, config=self._config)
self._messages.extend(messages)
self._exit_messages.extend(errors)
else:
message = f"'{entry.name}' cannot be reconfigured. (post process)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
def _check_choices(self) -> None:
for entry in self._config.entries:
if entry.cli_parameters and entry.choices:
if isinstance(entry.value.current, list):
for value in entry.value.current:
logged = self._check_choice(entry=entry, value=value)
if logged:
break
else:
self._check_choice(entry=entry, value=entry.value.current)
def _check_choice(self, entry: SettingsEntry, value: Union[bool, str]):
if entry.cli_parameters and entry.choices:
if value not in entry.choices:
self._exit_messages.append(ExitMessage(message=entry.invalid_choice))
choices = [
f"{entry.cli_parameters.short} {str(choice).lower()}"
for choice in entry.choices
]
exit_msg = f"Try again with {oxfordcomma(choices, 'or')}"
self._exit_messages.append(
ExitMessage(message=exit_msg, prefix=ExitPrefix.HINT),
)
return True
return False
def _apply_previous_cli_to_current(self) -> None:
"""Apply eligible previous CLI values to current not set by the CLI."""
# _apply_previous_cli_entries must be ALL or a list of entries
if self._apply_previous_cli_entries is not C.ALL and not isinstance(
self._apply_previous_cli_entries,
list,
):
return
current_subcommand = [
entry.value.current for entry in self._config.entries if entry.subcommand_value is True
][0]
previous_subcommand = [
entry.value.current
for entry in self._config.initial.entries
if entry.subcommand_value is True
][0]
for current_entry in self._config.entries:
# retrieve the corresponding previous entry
previous_entry = self._config.initial.entry(current_entry.name)
# skip if not initial and not able to be changed
if not any((self._config.internals.initializing, current_entry.change_after_initial)):
message = f"'{current_entry.name}' cannot be reconfigured (apply previous cli)"
self._messages.append(LogMessage(level=logging.INFO, message=message))
continue
# skip if currently set from the CLI
if current_entry.value.source is C.USER_CLI:
continue
# skip if _apply_previous_cli_entries is a list and the entry isn't in it
if (
isinstance(self._apply_previous_cli_entries, list)
and current_entry.name not in self._apply_previous_cli_entries
):
continue
# skip if the previous entry not eligible for reapplication
if previous_entry.apply_to_subsequent_cli not in [C.ALL, C.SAME_SUBCOMMAND]:
continue
# skip if the same subcommand is required for reapplication
if current_entry.apply_to_subsequent_cli is C.SAME_SUBCOMMAND:
if current_subcommand != previous_subcommand:
continue
# skip if the previous entry was not set by the CLI
if previous_entry.value.source is not C.USER_CLI:
continue
current_entry.value.current = previous_entry.value.current
current_entry.value.source = C.PREVIOUS_CLI
def _retrieve_ansible_cfg(self):
"""Retrieve the ansible.cfg file.
EE support is needed early on here so the post processors
can have access to the ansible.cfg file contents as a fallback to
navigators settings sources. The value won't be set but it is needed to
determine where the ansible.cfg file should be pulled from
"""
ee_enabled = str(self._config.execution_environment).lower() == "true"
parsed_ansible_cfg = parse_ansible_cfg(ee_enabled=ee_enabled)
self._messages.extend(parsed_ansible_cfg.messages)
self._exit_messages.extend(parsed_ansible_cfg.exit_messages)
self._config.internals.ansible_configuration = parsed_ansible_cfg.config
```
#### File: ansible_navigator/ui_framework/field_button.py
```python
from dataclasses import dataclass
from typing import Callable
from typing import Optional
from .curses_window import Window
from .form_defs import FieldValidationStates
from .form_handler_button import FormHandlerButton
from .validators import FieldValidators
@dataclass
class FieldButton:
"""A text input field."""
name: str
text: str
disabled: bool = True
pressed: bool = False
color: int = 0
window_handler = FormHandlerButton
validator: Callable = FieldValidators.none
win: Optional[Window] = None
@property
def full_prompt(self) -> str:
"""No default to add into the prompt for checkbox.
:returns: Empty string
"""
return ""
def validate(self, response: FieldValidationStates) -> None:
"""Validate this instance.
:param response: List of field states for validation.
"""
validation = self.validator(response)
if validation.error_msg:
self.disabled = True
else:
self.disabled = False
def conditional_validation(self, response: FieldValidationStates) -> None:
"""Conditional validation used for form validation.
:param response: List of field states for validation.
"""
self.validate(response)
```
#### File: ansible_navigator/ui_framework/field_radio.py
```python
from dataclasses import dataclass
from dataclasses import field
from functools import partial
from typing import Callable
from typing import List
from typing import Union
from .form_handler_options import FormHandlerOptions
from .sentinels import Unknown
from .sentinels import unknown
from .validators import FieldValidators
from .validators import Validation
@dataclass
class FieldRadio:
"""A form field containing radios."""
prompt: str
name: str
current_error: str = ""
valid: Union[Unknown, bool] = unknown
options: List = field(default_factory=list)
window_handler = FormHandlerOptions
@property
def checked(self):
"""Conveniently return just checked options.
:returns: Checked options
"""
return tuple(option.name for option in self.options if option.checked)
@property
def formatted_default(self) -> str:
"""Format the field prompt with an empty string.
:returns: Empty string
"""
return ""
@property
def full_prompt(self) -> str:
"""Format the prompt.
:returns: Prompt
"""
return self.prompt
@property
def validator(self) -> Callable:
"""Provide a validator based on form type.
:returns: Validation of checked entries
"""
return partial(FieldValidators.some_of_or_none, max_selected=1, min_selected=1)
def _validate(self, response: "FieldRadio") -> Validation:
validation = self.validator(choices=response.options)
if validation.error_msg:
self.valid = False
else:
self.valid = True
return validation
def validate(self, response: "FieldRadio") -> None:
"""Validate this FieldRadio instance.
:param response: Instance to check and verify options are valid
"""
validation = self._validate(response)
self.current_error = validation.error_msg
def conditional_validation(self, response: "FieldRadio") -> None:
"""Conditional validation for a FieldRadio instance.
:param response: Instance to check and verify options are valid
"""
self._validate(response)
self.current_error = ""
```
#### File: ansible_navigator/ui_framework/field_working.py
```python
from dataclasses import dataclass
from typing import Callable
from typing import List
from typing import Optional
from typing import Union
from .curses_window import Window
from .form_handler_working import FormHandlerWorking
from .sentinels import Unknown
from .sentinels import unknown
from .validators import FieldValidators
@dataclass
class FieldWorking:
"""A text input field."""
name: str
messages: List[str]
current_error: str = ""
window_handler = FormHandlerWorking
valid: Union[bool, Unknown] = unknown
validator: Callable = FieldValidators.null
win: Optional[Window] = None
@property
def full_prompt(self) -> str:
"""Return the max width information.
This is needed because windows width and : placement
is based on the largest 'prompt'.
:returns: Max width information message
"""
return max(self.messages)
def validate(self, response: str) -> None:
# pylint: disable=unused-argument
"""No validation required for working field.
:param response: Field response
"""
self.valid = True
def conditional_validation(self, response: str) -> None:
"""No conditional validation.
:param response: Field response
"""
self.validate(response)
```
#### File: ansible_navigator/ui_framework/form_handler_information.py
```python
from curses import ascii as curses_ascii
from typing import TYPE_CHECKING
from typing import List
from typing import Tuple
from .curses_window import CursesWindow
if TYPE_CHECKING:
from .field_information import FieldInformation # pylint: disable=cyclic-import
class FormHandlerInformation(CursesWindow):
"""Handle form button."""
def __init__(self, screen, ui_config):
"""Initialize the handler for a informational notification.
:param screen: A curses window
:param ui_config: The current user interface configuration
"""
super().__init__(ui_config=ui_config)
self._screen = screen
@staticmethod
def handle(idx, form_fields: List) -> Tuple["FieldInformation", int]:
"""Handle the information field, immediate return.
:param idx: Index to retrieve specific field
:param form_fields: List of fields
:returns: Indexed form fields
"""
return form_fields[idx], curses_ascii.NL
```
#### File: ansible_navigator/utils/ansi.py
```python
import os
import sys
from sys import stdout
from .definitions import Color
IS_TTY = stdout.isatty()
COLOR = "NO_COLOR" not in os.environ and IS_TTY
def changed(color: bool, message: str):
"""Output changed information to the console.
:param color: Whether to color the message
:param message: The message to output
"""
if color:
print(f"\r{Color.YELLOW}{message}{Color.END}\033[K")
else:
print(message)
def failed(color: bool, message: str):
"""Output failure information to the console.
:param color: Whether to color the message
:param message: The message to output
"""
if color:
print(f"\r{Color.RED}{message}{Color.END}\033[K")
else:
print(message)
def info(color: bool, message: str):
"""Output info information to the console.
:param color: Whether to color the message
:param message: The message to output
"""
if color:
print(f"{Color.CYAN}{message}{Color.END}")
else:
print(message)
def subtle(color: bool, message: str):
"""Output subtle information to the console.
:param color: Whether to color the message
:param message: The message to output
"""
if color:
print(f"{Color.GREY}{message}{Color.END}")
else:
print(message)
def prompt_enter():
"""Output prompt information to the console."""
try:
input("Press Enter to continue: ")
except KeyboardInterrupt:
sys.exit(0)
def prompt_yn(message: str) -> bool:
"""Output prompt information to the console.
:param message: The message to output
:return: Whether the user answered yes
"""
try:
reply = None
while reply not in ("", "y", "n"):
reply = input(f"{message} (Y/n): ").lower()
return reply in ("", "y")
except KeyboardInterrupt:
sys.exit(0)
def success(color: bool, message: str):
"""Output success information to the console.
:param color: Whether to color the message
:param message: The message to output
"""
if color:
print(f"\r{Color.GREEN}{message}{Color.END}\033[K")
else:
print(message)
def warning(color: bool, message: str):
"""Output warning information to the console.
:param color: Whether to color the message
:param message: The message to output
"""
if color:
print(f"{Color.YELLOW}{message}{Color.END}")
else:
print(message)
def working(color: bool, message: str):
"""Output working information to the console.
:param color: Whether to color the message
:param message: The message to output
"""
if color:
print(f"{Color.GREY}{message}{Color.END}", end="", flush=True)
else:
print(message)
def blank_line():
"""Output a blank line to the console."""
print()
```
#### File: ansible_navigator/utils/definitions.py
```python
import logging
import textwrap
from dataclasses import dataclass
from dataclasses import field
from enum import Enum
from typing import List
from typing import NamedTuple
GOLDEN_RATIO = 1.61803398875
class Color:
"""Color constants."""
BLACK = "\033[30m"
RED = "\033[31m"
GREEN = "\033[32m"
YELLOW = "\033[33m"
BLUE = "\033[34m"
MAGENTA = "\033[35m"
CYAN = "\033[36m"
WHITE = "\033[37m"
GREY = "\033[90m" # Bright black?
BRIGHT_RED = "\033[91m"
BRIGHT_GREEN = "\033[92m"
BRIGHT_YELLOW = "\033[93m"
BRIGHT_BLUE = "\033[94m"
BRIGHT_MAGENTA = "\033[95m"
BRIGHT_CYAN = "\033[96m"
BRIGHT_WHITE = "\033[97m"
END = "\033[0m"
class Decoration:
"""Decoration constants."""
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
REVERSED = "\033[7m"
END = "\033[0m"
class ExitPrefix(Enum):
"""An exit message prefix."""
ERROR = "Error"
HINT = "Hint"
NOTE = "Note"
WARNING = "Warning"
@classmethod
def _longest_name(cls):
"""Return the longest exit message prefix.
:returns: The longest exit message prefix
"""
return max(len(member.value) for member in cls)
@classmethod
def longest_formatted(cls):
"""Return the longest exit message prefix.
:returns: The longest exit message prefix
"""
return max(len(str(member)) for member in cls)
def __str__(self):
"""Return the exit message prefix as a string.
:returns: The exit message prefix as a string
"""
return f"{' ' * (self._longest_name() - len(self.name))}{self.name.capitalize()}: "
@dataclass
class ExitMessage:
"""An object to hold a message to present when exiting."""
#: The message that will be presented
message: str
#: The prefix for the message, used for formatting
prefix: ExitPrefix = ExitPrefix.ERROR
@property
def color(self):
"""Return a color for the prefix.
:returns: The color for the prefix
"""
color_mapping = {
ExitPrefix.ERROR: Color.RED,
ExitPrefix.HINT: Color.CYAN,
ExitPrefix.NOTE: Color.GREEN,
ExitPrefix.WARNING: Color.YELLOW,
}
return color_mapping[self.prefix]
@property
def level(self):
"""Return a log level.
:returns: The log level
"""
mapping = {
ExitPrefix.ERROR: logging.ERROR,
ExitPrefix.HINT: logging.INFO,
ExitPrefix.NOTE: logging.INFO,
ExitPrefix.WARNING: logging.WARNING,
}
return mapping[self.prefix]
def to_lines(self, color: bool, width: int, with_prefix: bool) -> List[str]:
"""Output exit message to the console.
:param color: Whether to color the message
:param width: Constrain message to width
:param with_prefix: Whether to prefix the message
:returns: The exit message as a string
"""
prefix_length = ExitPrefix.longest_formatted()
indent = " " * prefix_length
message = textwrap.fill(
self.message,
width=width,
break_on_hyphens=False,
initial_indent=str(self.prefix) if with_prefix else indent,
subsequent_indent=indent,
)
printable = []
lines = message.splitlines()
start_color = self.color if color else ""
end_color = Color.END if color else ""
for line in lines:
printable.append(f"{start_color}{line}{end_color}")
return printable
@dataclass
class ExitMessages:
"""A mechanism to store multiple exit messages."""
messages: List[ExitMessage] = field(default_factory=list)
def to_strings(self, color: bool, width: int) -> List[str]:
"""Output exit messages to the console.
:param color: Whether to color the message
:param width: Constrain messages to width
:returns: The exit messages as a list of strings
"""
printable = []
new_section = True
for idx, message in enumerate(self.messages):
# Print the prefix if the next is different or a new section
if new_section:
# Use prefix, starting a new section
with_prefix = True
else:
try:
# Prefix if previous is different
with_prefix = self.messages[idx - 1].prefix != message.prefix
except IndexError:
# Last message
with_prefix = True
printable.extend(message.to_lines(color=color, with_prefix=with_prefix, width=width))
try:
next_prefix = self.messages[idx + 1].prefix
# Never break before a hint
if next_prefix is ExitPrefix.HINT:
new_section = False
continue
# Keep like items together
if message.prefix is next_prefix:
new_section = False
continue
# Start a new section
printable.append("")
new_section = True
except IndexError:
pass
return printable
class LogMessage(NamedTuple):
"""An object to hold a message destined for the logger."""
level: int
message: str
```
#### File: ansible_navigator/utils/dot_paths.py
```python
import copy
import operator
from enum import Enum
from functools import reduce
from typing import Dict
from typing import List
from typing import MutableMapping
from typing import Tuple
from typing import Union
class MergeBehaviors(Enum):
"""The merge behaviors."""
LIST_LIST_EXTEND = "append list to list"
LIST_LIST_REPLACE = "replace list with list"
LIST_APPEND = "append to list"
LIST_REPLACE = "replace list"
LIST_SORT = "sort resulting list"
LIST_UNIQUE = "only unique values in resulting list"
DICT_DICT_UPDATE = "update left dict with right dict"
DICT_DICT_REPLACE = "replace left dict with right dict"
def get_with_path(content: MutableMapping, path: str):
"""Get a value from a path in a dictionary.
:param content: The content of the settings file
:param path: The path to the value
:return: The value at the path
"""
return reduce(operator.getitem, path.split("."), content)
def check_path(content: MutableMapping, path: str):
"""Check if a path exists in a dictionary.
:param content: The content of the settings file
:param path: The path to the value
:return: Whether the path exists
"""
try:
get_with_path(content, path)
return True
except (KeyError, TypeError):
return False
def delete_with_path(content: MutableMapping, path: str):
"""Delete a value from a path in a dictionary.
:param content: The content of the settings file
:param path: The path to the value
"""
parts = path.split(".")
del reduce(operator.getitem, parts[:-1], content)[parts[-1]]
def ascendants_from_path(path: str):
"""Get the ascendants of a path.
:param path: The path to the value
:return: The ascendants of the path
"""
parts = path.split(".")
return [path.rsplit(".", i)[0] for i in range(len(parts))]
def descendants_to_path(path: str):
"""Get the descendants to a path.
:param path: The path to the value
:return: The descendants to the path
"""
parts = path.split(".")
return [path.rsplit(".", i)[0] for i in reversed(range(len(parts)))]
def remove_and_delete_empty_ascendants(content: MutableMapping, path: str):
"""Remove and delete empty ascendants.
:param content: The content of the settings file
:param path: The path to the value
"""
ascendants = ascendants_from_path(path)
delete_with_path(content, ascendants.pop(0))
while ascendants:
ascendant = ascendants.pop(0)
branch_value = get_with_path(content, ascendant)
if branch_value == {}:
delete_with_path(content, ascendant)
else:
break
def place_at_path(
behaviors: Tuple[MergeBehaviors, ...],
content: Dict,
path: str,
value: Union[bool, int, list, float, str, List, Dict],
) -> Dict:
"""Place a value at a path in a dictionary.
:param behaviors: The merge behaviors
:param content: The content of the settings file
:param path: The path to the value
:param value: The value to place
:raises ValueError: If something can't be done
:return: The updated content
"""
# pylint: disable=too-many-branches
if (
MergeBehaviors.DICT_DICT_REPLACE in behaviors
and MergeBehaviors.DICT_DICT_UPDATE in behaviors
):
raise ValueError("Can't use both DICT_DICT_REPLACE and DICT_DICT_UPDATE behaviors")
if (
MergeBehaviors.LIST_LIST_EXTEND in behaviors
and MergeBehaviors.LIST_LIST_REPLACE in behaviors
):
raise ValueError("Can't use both LIST_LIST_EXTEND and LIST_LIST_REPLACE behaviors")
copied_content = copy.deepcopy(content)
nested = copied_content
if path in ("", None):
if isinstance(value, dict):
if MergeBehaviors.DICT_DICT_REPLACE in behaviors:
return value
if MergeBehaviors.DICT_DICT_UPDATE in behaviors:
return {**nested, **value}
raise ValueError("Cannot place non dict at root of dict")
for part in path.split("."):
if part == path.rsplit(".", maxsplit=1)[-1]:
if isinstance(nested.get(part), list):
if isinstance(value, list):
if MergeBehaviors.LIST_LIST_EXTEND in behaviors:
nested[part].extend(value)
elif MergeBehaviors.LIST_LIST_REPLACE in behaviors:
nested[part] = value
else:
raise ValueError("No behavior specified for LIST_LIST")
else:
if MergeBehaviors.LIST_APPEND in behaviors:
nested[part].append(value)
elif MergeBehaviors.LIST_REPLACE in behaviors:
nested[part] = value
continue
else:
raise ValueError("No behavior specified for LIST_*")
if MergeBehaviors.LIST_UNIQUE in behaviors:
nested[part] = list(dict.fromkeys(nested[part]))
if MergeBehaviors.LIST_SORT in behaviors:
nested[part].sort()
continue
if isinstance(nested.get(part), dict):
if isinstance(value, dict):
if MergeBehaviors.DICT_DICT_UPDATE in behaviors:
nested[part].update(value)
elif MergeBehaviors.DICT_DICT_REPLACE in behaviors:
nested[part] = value
else:
raise ValueError("No behavior specified for DICT_DICT")
continue
nested[part] = value
elif part not in nested:
nested[part] = {}
nested = nested[part]
return copied_content
def move_to_path(
behaviors: Tuple[MergeBehaviors, ...],
content: Dict,
new_path: str,
old_path: str,
) -> Dict:
"""Move a value to a path in a dictionary.
:param behaviors: The merge behaviors
:param content: The content of the settings file
:param old_path: The path to the value
:param new_path: The path to the value
:return: The updated content
"""
copied_content = copy.deepcopy(content)
if new_path == old_path:
return copied_content
value = get_with_path(content=copied_content, path=old_path)
delete_with_path(content=copied_content, path=old_path)
updated_content = place_at_path(
content=copied_content,
path=new_path,
value=value,
behaviors=behaviors,
)
return updated_content
```
#### File: configuration_subsystem/post_processors/test_time_zone.py
```python
from copy import deepcopy
from dataclasses import dataclass
from typing import Dict
from typing import Optional
from typing import Union
import pytest
from ansible_navigator.configuration_subsystem import Constants as C
from ansible_navigator.configuration_subsystem import NavigatorConfiguration
from ansible_navigator.configuration_subsystem.configurator import Configurator
from ansible_navigator.configuration_subsystem.navigator_post_processor import (
NavigatorPostProcessor,
)
@dataclass
class Scenario:
"""Data structure for the time zone post processor tests."""
current: Union[bool, str, Dict]
source: C
exit_message_substr: str = ""
expected: Optional[str] = None
index: int = 0
def __post_init__(self):
"""Set the expected if errors are expected."""
if self.expected is None:
object.__setattr__(self, "expected", self.current)
def __str__(self):
"""Provide a test id.
:returns: The test id
"""
return f"{self.source}-{self.current}"
test_data = (
Scenario(
current="foo",
exit_message_substr=(
"The specified time zone 'foo', set by environment variable, could not be found."
),
source=C.ENVIRONMENT_VARIABLE,
),
Scenario(
current="Japan",
source=C.ENVIRONMENT_VARIABLE,
),
Scenario(
current="local",
source=C.ENVIRONMENT_VARIABLE,
),
Scenario(
current={},
exit_message_substr=(
"The specified time zone '{}', set by settings file,"
" must be a string but was found to be a 'dict'."
),
source=C.USER_CFG,
),
Scenario(
current=True,
exit_message_substr=(
"The specified time zone 'True', set by settings file,"
" must be a string but was found to be a 'bool'."
),
source=C.USER_CFG,
),
Scenario(
current="foo",
source=C.USER_CFG,
exit_message_substr=(
"The specified time zone 'foo', set by settings file, could not be found."
),
),
Scenario(
current="Japan",
source=C.USER_CFG,
),
Scenario(
current="local",
source=C.USER_CFG,
),
Scenario(
current="foo",
exit_message_substr=(
"The specified time zone 'foo', set by command line, could not be found."
),
source=C.USER_CLI,
),
Scenario(
current="Japan",
source=C.USER_CLI,
),
Scenario(
current="local",
source=C.USER_CLI,
),
)
@pytest.mark.parametrize(argnames="data", argvalues=test_data, ids=str)
def test_pp_direct(data: Scenario):
"""Test the time zone post processor.
:param data: The test data
"""
settings = deepcopy(NavigatorConfiguration)
entry = settings.entry("time_zone")
entry.value.current = data.current
entry.value.source = data.source
_messages, exit_messages = NavigatorPostProcessor().time_zone(
entry=entry,
config=settings,
)
if data.exit_message_substr:
assert data.exit_message_substr in exit_messages[0].message
else:
assert entry.value.current == data.expected
env_var_test_data = [s for s in test_data if s.source is C.ENVIRONMENT_VARIABLE]
@pytest.mark.parametrize(argnames="data", argvalues=env_var_test_data, ids=str)
def test_env_var(monkeypatch: pytest.MonkeyPatch, data: Scenario):
"""Test the time zone post processor using the environment variable.
:param monkeypatch: The monkey patch fixture
:param data: The test data
"""
application_configuration = deepcopy(NavigatorConfiguration)
application_configuration.internals.initializing = True
configurator = Configurator(application_configuration=application_configuration, params=[])
monkeypatch.setenv("TZ", str(data.current))
_messages, exit_messages = configurator.configure()
if data.exit_message_substr:
assert data.exit_message_substr in exit_messages[2].message
else:
assert application_configuration.entry("time_zone").value.current == data.expected
```
#### File: unit/configuration_subsystem/test_json_schema.py
```python
from copy import deepcopy
from pathlib import Path
from typing import Any
from typing import Dict
import pytest
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from ansible_navigator.configuration_subsystem import NavigatorConfiguration
from ansible_navigator.configuration_subsystem import to_sample
from ansible_navigator.configuration_subsystem.definitions import SettingsSchemaType
from ansible_navigator.utils.serialize import Loader
from ansible_navigator.utils.serialize import yaml
from .defaults import TEST_FIXTURE_DIR
def test_basic(schema_dict: SettingsSchemaType):
"""Simple test to ensure an exception isn't raised.
:param schema_dict: The json schema as a dictionary
"""
assert schema_dict["$schema"] == "http://json-schema.org/draft-07/schema"
assert isinstance(schema_dict, dict)
assert isinstance(schema_dict["properties"], dict)
assert isinstance(schema_dict["properties"]["ansible-navigator"], dict)
assert isinstance(schema_dict["properties"]["ansible-navigator"]["properties"], dict)
# This checks for a number of root keys in the settings file
assert len(schema_dict["properties"]["ansible-navigator"]["properties"]) >= 15
def test_additional_properties(schema_dict: SettingsSchemaType):
"""Ensure additional properties are forbidden throughout the schema.
:param schema_dict: The json schema as a dictionary
"""
def property_dive(subschema: SettingsSchemaType):
if "properties" in subschema:
assert subschema["additionalProperties"] is False
for value in subschema["properties"].values():
property_dive(subschema=value)
property_dive(schema_dict)
def test_no_extras(schema_dict: SettingsSchemaType):
"""Ensure no extras exist in either settings or schema.
:param schema_dict: The json schema as a dictionary
"""
settings = deepcopy(NavigatorConfiguration)
all_paths = [
setting.settings_file_path(prefix=settings.application_name_dashed)
for setting in settings.entries
]
json_paths = []
def dive(subschema, path=""):
if "properties" in subschema:
for name, prop in subschema["properties"].items():
if path:
dive(prop, f"{path}.{name}")
else:
dive(prop, name)
else:
json_paths.append(path)
dive(schema_dict)
assert sorted(all_paths) == sorted(json_paths)
def test_schema_sample_full_tests(schema_dict: SettingsSchemaType):
"""Check the full settings file against the schema.
:param schema_dict: The json schema as a dictionary
"""
settings_file = Path(TEST_FIXTURE_DIR, "ansible-navigator.yml")
with settings_file.open(encoding="utf-8") as fh:
settings_contents = yaml.load(fh, Loader=Loader)
validate(instance=settings_contents, schema=schema_dict)
def test_schema_sample_full_package_data(schema_dict: SettingsSchemaType):
"""Check the settings file used as a sample against the schema.
:param schema_dict: The json schema as a dictionary
"""
settings = deepcopy(NavigatorConfiguration)
commented, uncommented = to_sample(settings=settings)
settings_dict = yaml.load(commented, Loader=Loader)
validate(instance=settings_dict, schema=schema_dict)
settings_dict = yaml.load(uncommented, Loader=Loader)
validate(instance=settings_dict, schema=schema_dict)
def test_schema_sample_wrong(schema_dict: SettingsSchemaType):
"""Check the broken settings file against the schema.
:param schema_dict: The json schema as a dictionary
"""
settings_file = Path(TEST_FIXTURE_DIR, "ansible-navigator_no_app.yml")
with settings_file.open(encoding="utf-8") as fh:
settings_contents = yaml.load(fh, Loader=Loader)
with pytest.raises(ValidationError) as exc:
validate(instance=settings_contents, schema=schema_dict)
assert "'non_app' is not one of ['builder'" in str(exc)
def test_schema_dict_all_required(
schema_dict_all_required: SettingsSchemaType,
):
"""Confirm every entry in the schema has required.
:param schema_dict_all_required: The json schema as a dictionary, everything required
"""
def property_dive(subschema: Dict[str, Any]):
if "properties" in subschema:
assert subschema["required"] == list(subschema["properties"].keys())
for value in subschema["properties"].values():
property_dive(subschema=value)
property_dive(schema_dict_all_required)
``` |
{
"source": "JohnLieske/ansible-onepasswordconnect-collection",
"score": 2
} |
#### File: plugins/modules/generic_item.py
```python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
module: generic_item
author:
- 1Password (@1Password)
requirements: []
notes:
short_description: Creates a customizable 1Password Item
description:
- Create or update an Item in a Vault.
- Fully customizable using the Fields option.
- B(NOTE) Any item fields without C(label) are removed when updating an existing item.
options:
vault_id:
type: str
required: True
description:
- ID of the 1Password vault that will be accessed.
- Uses environment variable C(OP_VAULT_ID) if not explicitly defined in the playbook.
name:
type: str
aliases:
- title
description:
- Name of the Item
- If C(state) is C(present) and c(uuid) is defined, the given value will overwrite previous Item name
- If C(state) is C(present) and c(uuid) is NOT defined, the module will try to find an item with the same name.
If an item cannot be found, a new item with the given name is created and the old item is not deleted.
uuid:
type: str
description:
- Unique ID for a single Item.
- Ignored if C(state) is C(present) and the item doesn't exist.
- If C(state) is C(present) and C(uuid) is NOT defined, the module will try to find an item using C(name).
If an item cannot be found, a new item is created with the C(name) value and the old item is not changed.
category:
type: str
default: api_credential
description:
- >
Applies the selected category template to the item. Other 1Password clients use category templates to help
organize fields when rendering an item.
- >
The category cannot be changed after creating an item.
To change the category, recreate the item with the new category
- >
If the category is C(login) or C(password) and the item has a field named C(password),
that field will be the primary password when the item is displayed in 1Password clients.
- >
If the category is C(login) and the item has a field named C(username),
that field becomes the primary username when the item is displayed in 1Password clients.
choices:
- login
- password
- server
- database
- api_credential
- software_license
- secure_note
- wireless_router
- bank_account
- email_account
- credit_card
- membership
- passport
- outdoor_license
- driver_license
- identity
- reward_program
- social_security_number
urls:
type: list
elements: str
description:
- Store one or more URLs on an item
- URLs are clickable in the 1Password UI
favorite:
type: bool
default: false
description: Toggles the 'favorite' attribute for an Item
fields:
description: List of fields associated with the Item
type: list
elements: dict
suboptions:
label:
type: str
required: true
description: The name of the field
value:
type: str
description: Sets the value of the field.
section:
type: str
description:
- Places the field into a named group. If section does not exist, it is created.
- If two or more fields belong to the same C(section), they are grouped together under that section.
field_type:
type: str
default: string
aliases:
- type
description:
- Sets expected value type for the field.
- >
If C(generic_item.category) is C(login) or C(password), the field with type C(concealed) and
named C(password) becomes the item's primary password.
choices:
- string
- email
- concealed
- url
- otp
- date
- month_year
generate_value:
type: str
default: 'never'
choices: ['always', 'on_create', 'never']
description:
- Generate a new value for the field using the C(generator_recipe).
- Overrides C(value) if I(generate_value=on_create) and field does not exist or if I(generate_value=always).
- I(generate_value=never) will use the data in C(value).
- I(generate_value=always) will assign a new value to this field every time Ansible runs the module.
- I(generate_value=on_create) will generate a new value and ignore C(value) if the field does not exist.
If the field does exist, the module will use the previously generated value and ignore
the C(value).
- The module searches for field by using a case-insensitive match for the C(label)
within the field's C(section).
generator_recipe:
type: dict
description:
- Configures 1Password's Secure Password Generator
- If C(generate_value) is 'never', these options have no effect.
suboptions:
length:
type: int
default: 32
description:
- Defines number of characters in generated password
include_digits:
type: bool
default: true
description:
- Toggle whether generated password includes digits (0-9)
include_letters:
type: bool
default: true
description:
- Toggle whether generated password includes ASCII characters (a-zA-Z)
include_symbols:
type: bool
default: true
description:
- Toggle whether generated password includes ASCII symbol characters
extends_documentation_fragment:
- onepassword.connect.item_tags
- onepassword.connect.item_state
- onepassword.connect.api_params
'''
EXAMPLES = '''
- name: Create an Item with no fields
onepassword.connect.generic_item:
title: Example Item
state: present
- name: Create an item and generate its value if the item does not exist.
onepassword.connect.generic_item:
title: Club Membership
state: present
fields:
- label: Secret Code
field_type: concealed
generate_value: on_create
generator_recipe:
length: 16
include_letters: yes
include_digits: yes
include_symbols: no
section: Club Card Details
register: op_item # Access item values through `op_item['data']`
no_log: true # Hide the output - it will contain the secret value you just stored
- name: Update an item while preserving the generated Secret Code value
onepassword.connect.generic_item:
title: Club Membership
state: present
fields:
- label: Secret Code
field_type: concealed
overwrite: no
generate_value: never
generator_recipe: # ignored because generate_value == never
length: 16
include_letters: yes
include_digits: yes
include_symbols: no
section: Club Card Details
no_log: true
- name: Change an Item's Name and leave the generated Secret Code value unchanged
onepassword.connect.generic_item:
title: Guild Membership Details
uuid: 3igj89sdf9ssdf89g
state: present
fields:
- label: Secret Code
field_type: concealed
overwrite: no
generate_value: on_create
generator_recipe: # ignored because generate_value == never
length: 16
include_letters: yes
include_digits: yes
include_symbols: no
section: Club Card Details
no_log: true
- name: Delete an Item by its Item UUID
onepassword.connect.generic_item:
uuid: 3igj89sdf9ssdf89g
state: absent
no_log: true
- name: Delete an Item by its name
onepassword.connect.generic_item:
title: Club Membership
state: absent
no_log: true
'''
RETURN = '''
op_item:
description: >
Dictionary containing Item properties or an empty dictionary if I(state=absent).
See 1Password Connect API for complete structure.
type: complex
returned: always
contains:
category:
description: The Item template used when creating or modifying the item
returned: success
type: str
sample: LOGIN
created_at:
description: Timestamp that reports when the Item was originally created
returned: success
type: str
sample: "2020-11-23T15:29:07.312397-08:00"
updated_at:
description: Timestamp that reports when the Item was last modified.
returned: success
type: str
sample: "2020-11-23T15:29:07.312397-08:00"
id:
description: Unique ID for the Item.
returned: success
type: str
sample: "bactwEXAMPLEpxhpjxymh7yy"
tags:
description: All unique tag values associated with the item
type: list
elements: str
returned: success
sample:
- tag1
- tag2
title:
description: User-provided name for the Item. Displayed in 1Password clients.
type: str
returned: success
sample: My Test Item
vault:
description: Information about the Vault containing this Item.
type: dict
returned: success
sample:
- id: abc1234EXAMPLEvault5678
fields:
type: dict
description: Lists all defined fields for the Item. The key for each field is the field's label.
returned: success
sample: {"ExampleField": {"id": "123example", "label": "Test", "type": "STRING", "value": "exampleValue"}}
msg:
description: Information returned when an error occurs.
type: str
returned: failure
sample: Invalid Vault ID
'''
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.onepassword.connect.plugins.module_utils import specs, api, vault, errors
def main():
# Name always required when creating a new Item
required_if = [
("state", "present", ("name",))
]
module = AnsibleModule(
argument_spec=specs.op_item(),
supports_check_mode=True,
required_if=required_if
)
results = {"op_item": {}, "changed": False}
changed = False
api_response = {}
try:
api_client = api.create_client(module)
state = module.params["state"].lower()
item = vault.find_item(module.params, api_client)
if state == "absent":
changed, api_response = vault.delete_item(
item,
api_client,
check_mode=module.check_mode
)
else:
if not item:
changed, api_response = vault.create_item(
module.params,
api_client,
check_mode=module.check_mode
)
else:
changed, api_response = vault.update_item(
module.params,
item,
api_client,
check_mode=module.check_mode
)
except TypeError as e:
results.update({"msg": to_native("Invalid Item config: {err}".format(err=e))})
module.fail_json(**results)
except errors.Error as e:
results.update({"msg": to_native(e.message)})
module.fail_json(**results)
results.update({"op_item": api_response, "changed": bool(changed)})
module.exit_json(**results)
if __name__ == '__main__':
main()
```
#### File: plugins/module_utils/api.py
```python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import base64
import sys
import re
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six.moves.urllib.parse import urlencode, quote, urlunparse, urlparse
from ansible_collections.onepassword.connect.plugins.module_utils import errors, const
def create_client(module):
if not module.params.get("hostname") or not module.params.get("token"):
raise errors.AccessDeniedError(message="Server hostname or auth token not defined")
return OnePassword(
hostname=module.params["hostname"],
token=module.params["token"],
module=module
)
class OnePassword:
API_VERSION = "v1"
def __init__(self, hostname, token, module):
self.hostname = hostname
self.token = token
self._module = module
self._user_agent = _format_user_agent(
const.COLLECTION_VERSION,
python_version=".".join(str(i) for i in sys.version_info[:3]),
ansible_version=self._module.ansible_version
)
def _send_request(self, path, method="GET", data=None, params=None):
fetch_kwargs = {
"url": build_endpoint(self.hostname, path, params=params, api_version=self.API_VERSION),
"method": method,
"headers": self._build_headers(),
}
if method.upper() in ["POST", "PUT", "PATCH"]:
fetch_kwargs["data"] = self._module.jsonify(data)
response_body = {}
resp, info = fetch_url(self._module, **fetch_kwargs)
if resp:
try:
response_body = json.loads(resp.read().decode("utf-8"))
except (AttributeError, ValueError):
if info.get("status") == 204:
# No Content response
return {}
msg = "Server returned error with invalid JSON: {err}".format(
err=info.get("msg", "<Undefined error>")
)
return self._module.fail_json(msg=msg)
else:
raise_for_error(info)
return response_body
def _build_headers(self):
return {
"Authorization": "Bearer {token}".format(token=self.token),
"User-Agent": self._user_agent,
"Content-Type": "application/json",
"Accept": "application/json"
}
def get_item_by_id(self, vault_id, item_id):
path = "/vaults/{vault_id}/items/{item_id}".format(vault_id=vault_id, item_id=item_id)
return self._send_request(path)
def get_item_by_name(self, vault_id, item_name):
try:
item = self._get_item_id_by_name(vault_id, item_name)
item_id = item["id"]
except KeyError:
raise errors.NotFoundError
return self.get_item_by_id(vault_id, item_id)
def create_item(self, vault_id, item):
path = "/vaults/{vault_id}/items".format(vault_id=vault_id)
return self._send_request(path, method="POST", data=item)
def update_item(self, vault_id, item):
path = "/vaults/{vault_id}/items/{item_id}".format(vault_id=item["vault"]["id"], item_id=item["id"])
return self._send_request(path, method="PUT", data=item)
def delete_item(self, vault_id, item_id):
path = "/vaults/{vault_id}/items/{item_id}".format(vault_id=vault_id, item_id=item_id)
return self._send_request(path, method="DELETE")
def get_vaults(self):
path = "/vaults"
return self._send_request(path)
def get_vault_id_by_name(self, vault_name):
"""Find the vault ID associated with the given vault name
Loops through the results of the 'GET vault' query and tries
to do an exact match with each vault name that is returned.
:param vault_name: Name of the requested vault
: return: str
"""
resp = self.get_vaults()
for vault in resp:
if vault["name"] == vault_name:
return vault["id"]
raise errors.NotFoundError
def _get_item_id_by_name(self, vault_id, item_name):
"""Find the Item ID associated with the given Item Name
Loops through results of SCIM-style query and tries to
do an exact match with each returned Item Title and the given Item title
:param vault_id:
:param item_name: Title parameter of the requested Item
:return: str
"""
query_filter = {"filter": 'title eq "{item_name}"'.format(item_name=item_name)}
path = "/vaults/{vault_id}/items".format(vault_id=vault_id)
resp = self._send_request(path, params=query_filter)
if not resp:
raise errors.NotFoundError
if len(resp) > 1:
raise errors.APIError(
message="More than 1 match found for an Item with that name. Please adjust your search query."
)
return resp[0]
def build_endpoint(hostname, path, params=None, api_version=None):
url_parts = list(urlparse(hostname))
if not api_version:
api_version = OnePassword.API_VERSION
# Path _may_ have a space in it if client passes item name, for example
url_parts[2] = "{api_version}/{path}".format(
api_version=api_version,
path=quote(path.strip('/'))
)
if params:
url_parts[4] = urlencode(params)
return urlunparse(url_parts)
def raise_for_error(response_info):
try:
response_info_body = json.loads(response_info.get("body").decode("utf-8"))
err_details = {
"message": response_info_body.get("message"),
"status_code": response_info_body.get("status")
}
except (AttributeError, ValueError):
# `body` key not present if urllib throws an error ansible doesn't handle
err_details = {
"message": response_info.get("msg", "Error not defined"),
"status_code": response_info.get("status")
}
if err_details["status_code"] >= 500:
raise errors.ServerError(**err_details)
elif err_details["status_code"] == 404:
raise errors.NotFoundError(**err_details)
elif err_details["status_code"] in [401, 403]:
raise errors.AccessDeniedError(**err_details)
elif err_details["status_code"] == 400:
raise errors.BadRequestError(**err_details)
else:
raise errors.APIError(**err_details)
def _format_user_agent(collection_version, python_version=None, ansible_version=None):
return "op-connect-ansible/{version} Python/{py_version} Ansible/{ansible}".format(
version=collection_version,
py_version=python_version or "unknown",
ansible=ansible_version or "unknown"
)
# Client UUIDs must be exactly 26 characters.
CLIENT_UUID_LENGTH = 26
def valid_client_uuid(uuid):
"""Checks whether a given UUID meets the client UUID spec"""
# triple curly braces needed to escape f-strings as regex quantifiers
return re.match(rf"^[0-9a-z]{{{CLIENT_UUID_LENGTH}}}$", uuid) is not None
def create_client_uuid():
"""Creates a valid client UUID.
The UUID is not intended to be cryptographically random."""
rand_bytes = os.urandom(16)
base32_utf8 = base64.b32encode(rand_bytes).decode("utf-8")
return base32_utf8.rstrip("=").lower()
```
#### File: plugins/module_utils/test_item_crud.py
```python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible_collections.onepassword.connect.plugins.module_utils import vault, const
def test_create_item(mocker):
params = {
"vault_id": "Abc123",
"category": const.ItemType.API_CREDENTIAL,
"name": "My New Item",
"favorite": True
}
mock_item = dict(params)
mock_item["id"] = "abc123xyz9325"
mock_api = mocker.Mock()
mock_api.create_item.return_value = mock_item
modified, new_item = vault.create_item(params, mock_api)
assert modified is True
mock_api.create_item.assert_called_once()
def test_check_mode(mocker):
vault_id = "Abc123"
mock_api = mocker.Mock()
create_item_params = {
"vault_id": vault_id,
"category": const.ItemType.API_CREDENTIAL,
"name": "My New Item",
"favorite": True
}
modified, item = vault.create_item(create_item_params, mock_api, check_mode=True)
assert modified
assert mock_api.create_item.mock_calls == []
item["vault"] = {"id": create_item_params["vault_id"]}
item["id"] = "987654321"
update_params = dict(create_item_params)
update_params.update({"vault_id": vault_id, "name": "UPDATED Title"})
modified, updated_item = vault.update_item(update_params, item, mock_api, check_mode=True)
assert modified
assert mock_api.update_item.mock_calls == []
assert updated_item["title"] == update_params["name"]
modified, delete_response = vault.delete_item(updated_item, mock_api, check_mode=True)
assert modified
assert delete_response == {}
assert mock_api.delete_item.mock_calls == []
def test_delete_item(mocker):
item = {
"id": "xyz9876",
"vault": {"id": "ABC123"},
"category": const.ItemType.PASSWORD,
"title": "My New Item"
}
mock_api = mocker.Mock()
modified, new_item = vault.delete_item(item, mock_api)
assert modified is True
mock_api.delete_item.assert_called_once_with(item["vault"]["id"], item_id=item["id"])
def test_delete_item_when_does_not_exist(mocker):
mock_api = mocker.Mock()
non_existent_item = None
modified, resp = vault.delete_item(non_existent_item, mock_api)
assert modified is False
assert not mock_api.delete_item.called
def test_update_item(mocker):
vault_id = "Abc123"
original_item = {
"id": "XYZ123def456",
"vault": {"id": vault_id},
"title": "BEFORE_NAME",
"category": const.ItemType.PASSWORD,
"favorite": True
}
params = {
"favorite": False,
"title": "AFTER_NAME",
"vault_id": vault_id,
"category": const.ItemType.PASSWORD,
"value": "<PASSWORD>",
"fields": [
{
"label": "Password",
"value": "<PASSWORD>",
"field_type": const.FieldType.CONCEALED
}
]
}
mock_api = mocker.Mock()
mock_api.update_item.return_value = {
"id": original_item["id"],
"vault": original_item["vault"],
"title": params["title"],
"fields": params["fields"],
"category": params["category"],
"favorite": params["favorite"]
}
modified, updated_item = vault.update_item(params, original_item, mock_api)
assert modified is True
assert updated_item["favorite"] == params["favorite"]
assert updated_item["title"] == params["title"]
assert updated_item["vault"]["id"] == original_item["vault"]["id"]
``` |
{
"source": "johnlime/cleanrl",
"score": 3
} |
#### File: cleanrl/experiments/multiprocessing_cuda1.py
```python
import torch
import nvidia_smi
from torch import multiprocessing as mp
# Receiver
def receiver(x):
x[0] = 200
# print(x)
if __name__ == '__main__':
ctx = mp.get_context("fork")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
x = torch.arange(10).reshape(5, 2)
x.share_memory_()
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(x.data_ptr(), x.numel() * x.element_size(), 0)
assert x.is_shared()
assert x.is_pinned()
g = x.numpy()
procs = [
ctx.Process(target=receiver, args=(g,)) for _ in range(1)
]
for p in procs: p.start()
``` |
{
"source": "johnlinp/falcon-raml",
"score": 3
} |
#### File: tests/headers/test.py
```python
import falcon
import falcon.testing
from tests.headers import app
class TestHeadersRequiredString(falcon.testing.TestCase):
def setUp(self):
self.app = app.api
self.path = '/required/string'
self.header_name = 'X-Required-String'
self.header_values = [
'hello',
r'!@#$%^&*(){}',
]
def test_success(self):
for value in self.header_values:
response = self.simulate_get(self.path, headers={
self.header_name: value,
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_missing_header(self):
response = self.simulate_get(self.path)
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Missing header value')
self.assertEqual(
response.json['description'],
'The {} header is required.'.format(self.header_name)
)
def test_wrong_header(self):
response = self.simulate_get(self.path, headers={
'X-Wrong-Header': self.header_values[0],
})
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Missing header value')
self.assertEqual(
response.json['description'],
'The {} header is required.'.format(self.header_name)
)
def test_with_empty_string(self):
response = self.simulate_get(self.path, headers={
self.header_name: '',
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_with_number(self):
response = self.simulate_get(self.path, headers={
self.header_name: '42',
})
# number strings are strings too
self.assertEqual(response.status, falcon.HTTP_200)
class TestHeadersRequiredNumber(falcon.testing.TestCase):
def setUp(self):
self.app = app.api
self.path = '/required/number'
self.header_name = 'X-Required-Number'
self.header_values = [
'55.66',
'42',
'0',
'-55.66',
]
def test_success(self):
for value in self.header_values:
response = self.simulate_get(self.path, headers={
self.header_name: value,
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_missing_header(self):
response = self.simulate_get(self.path)
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Missing header value')
self.assertEqual(
response.json['description'],
'The {} header is required.'.format(self.header_name)
)
def test_not_numbers(self):
wrong_values = [
'hello',
'a123',
'123a',
]
for value in wrong_values:
response = self.simulate_get(self.path, headers={
self.header_name: value,
})
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Invalid header value')
self.assertEqual(
response.json['description'],
'The value provided for the {} header is invalid. '
'Should be a number.'.format(self.header_name)
)
class TestHeadersRequiredInteger(falcon.testing.TestCase):
def setUp(self):
self.app = app.api
self.path = '/required/integer'
self.header_name = 'X-Required-Integer'
self.header_values = [
'42',
'0',
'-42',
]
def test_success(self):
for value in self.header_values:
response = self.simulate_get(self.path, headers={
self.header_name: value,
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_missing_header(self):
response = self.simulate_get(self.path)
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Missing header value')
self.assertEqual(
response.json['description'],
'The {} header is required.'.format(self.header_name)
)
def test_not_integers(self):
wrong_values = [
'hello',
'55.66',
]
for value in wrong_values:
response = self.simulate_get(self.path, headers={
self.header_name: value,
})
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Invalid header value')
self.assertEqual(
response.json['description'],
'The value provided for the {} header is invalid. '
'Should be an integer.'.format(self.header_name)
)
class TestHeadersRequiredBoolean(falcon.testing.TestCase):
def setUp(self):
self.app = app.api
self.path = '/required/boolean'
self.header_name = 'X-Required-Boolean'
self.header_values = [
'true',
'false',
]
def test_success(self):
for value in self.header_values:
response = self.simulate_get(self.path, headers={
self.header_name: value,
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_missing_header(self):
response = self.simulate_get(self.path)
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Missing header value')
self.assertEqual(
response.json['description'],
'The {} header is required.'.format(self.header_name)
)
def test_not_booleans(self):
wrong_values = [
'hello',
'TRUE',
'FALSE',
'yes',
'no',
]
for value in wrong_values:
response = self.simulate_get(self.path, headers={
self.header_name: value,
})
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Invalid header value')
self.assertEqual(
response.json['description'],
'The value provided for the {} header is invalid. '
'Should be "true" or "false".'.format(self.header_name)
)
class TestHeadersNonRequiredMixed(falcon.testing.TestCase):
def setUp(self):
self.app = app.api
self.path = '/nonrequired/mixed'
self.string_header_name = 'X-Non-Required-String'
self.number_header_name = 'X-Non-Required-Number'
self.integer_header_name = 'X-Non-Required-Integer'
self.boolean_header_name = 'X-Non-Required-Boolean'
self.string_header_value = 'hello'
self.number_header_value = '55.66'
self.integer_header_value = '42'
self.boolean_header_value = 'true'
def test_no_headers(self):
response = self.simulate_get(self.path, headers={})
self.assertEqual(response.status, falcon.HTTP_200)
def test_all_headers(self):
response = self.simulate_get(self.path, headers={
self.string_header_name: self.string_header_value,
self.number_header_name: self.number_header_value,
self.integer_header_name: self.integer_header_value,
self.boolean_header_name: self.boolean_header_value,
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_only_string(self):
response = self.simulate_get(self.path, headers={
self.string_header_name: self.string_header_value,
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_wrong_number(self):
response = self.simulate_get(self.path, headers={
self.string_header_name: self.string_header_value,
self.number_header_name: 'hey',
self.integer_header_name: self.integer_header_value,
self.boolean_header_name: self.boolean_header_value,
})
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Invalid header value')
self.assertEqual(
response.json['description'],
'The value provided for the {} header is invalid. '
'Should be a number.'.format(self.number_header_name)
)
```
#### File: tests/jsonbody/app.py
```python
import falcon
import falconraml
class Dummy(object):
def on_post(self, request, response):
pass
api = falcon.API(middleware=[
falconraml.JsonTranslator(),
falconraml.ParameterChecker('test/tests/jsonbody/spec.raml'),
])
api.add_route('/simple/string', Dummy())
api.add_route('/malformed/schema', Dummy())
```
#### File: tests/nojsontranslator/app.py
```python
import falcon
import falconraml
class Dummy(object):
def on_post(self, request, response):
pass
api = falcon.API(middleware=[
falconraml.ParameterChecker('test/tests/nojsontranslator/spec.raml'),
])
api.add_route('/basic/string', Dummy())
```
#### File: tests/queryparams/test.py
```python
import falcon
import falcon.testing
from tests.queryparams import app
class TestQueryParamRequiredString(falcon.testing.TestCase):
def setUp(self):
self.app = app.api
self.path = '/required/string'
self.query_param_name = 'required_string'
self.query_param_values = [
'hello',
r'!@#$%^&*(){}',
]
def test_success(self):
for value in self.query_param_values:
response = self.simulate_get(self.path, params={
self.query_param_name: value,
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_missing_query_params(self):
response = self.simulate_get(self.path)
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Missing parameter')
self.assertEqual(
response.json['description'],
'The "{}" parameter is required.'.format(self.query_param_name)
)
def test_wrong_param(self):
response = self.simulate_get(self.path, params={
'wrong_param': self.query_param_values[0],
})
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Missing parameter')
self.assertEqual(
response.json['description'],
'The "{}" parameter is required.'.format(self.query_param_name)
)
def test_with_empty_string(self):
response = self.simulate_get(self.path, params={
self.query_param_name: '',
})
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Missing parameter')
self.assertEqual(
response.json['description'],
'The "{}" parameter is required.'.format(self.query_param_name)
)
def test_with_number(self):
response = self.simulate_get(self.path, params={
self.query_param_name: '42',
})
# number strings are strings too
self.assertEqual(response.status, falcon.HTTP_200)
class TestQueryParamsRequiredNumber(falcon.testing.TestCase):
def setUp(self):
self.app = app.api
self.path = '/required/number'
self.query_param_name = 'required_number'
self.query_param_values = [
'55.66',
'42',
'0',
'-55.66',
]
def test_success(self):
for value in self.query_param_values:
response = self.simulate_get(self.path, params={
self.query_param_name: value,
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_missing_query_params(self):
response = self.simulate_get(self.path)
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Missing parameter')
self.assertEqual(
response.json['description'],
'The "{}" parameter is required.'.format(self.query_param_name)
)
def test_not_numbers(self):
wrong_values = [
'hello',
'a123',
'123a',
]
for value in wrong_values:
response = self.simulate_get(self.path, params={
self.query_param_name: value,
})
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Invalid parameter')
self.assertEqual(
response.json['description'],
'The "{}" parameter is invalid. '
'Should be a number.'.format(self.query_param_name)
)
class TestQueryParamsRequiredInteger(falcon.testing.TestCase):
def setUp(self):
self.app = app.api
self.path = '/required/integer'
self.query_param_name = 'required_integer'
self.query_param_values = [
'42',
'0',
'-42',
]
def test_success(self):
for value in self.query_param_values:
response = self.simulate_get(self.path, params={
self.query_param_name: value,
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_missing_query_params(self):
response = self.simulate_get(self.path)
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Missing parameter')
self.assertEqual(
response.json['description'],
'The "{}" parameter is required.'.format(self.query_param_name)
)
def test_not_integers(self):
wrong_values = [
'hello',
'55.66',
]
for value in wrong_values:
response = self.simulate_get(self.path, params={
self.query_param_name: value,
})
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Invalid parameter')
self.assertEqual(
response.json['description'],
'The "{}" parameter is invalid. '
'Should be an integer.'.format(self.query_param_name)
)
class TestQueryParamsRequiredBoolean(falcon.testing.TestCase):
def setUp(self):
self.app = app.api
self.path = '/required/boolean'
self.query_param_name = 'required_boolean'
self.query_param_values = [
'true',
'false',
]
def test_success(self):
for value in self.query_param_values:
response = self.simulate_get(self.path, params={
self.query_param_name: value,
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_missing_query_params(self):
response = self.simulate_get(self.path)
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Missing parameter')
self.assertEqual(
response.json['description'],
'The "{}" parameter is required.'.format(self.query_param_name)
)
def test_not_booleans(self):
wrong_values = [
'hello',
'TRUE',
'FALSE',
'yes',
'no',
]
for value in wrong_values:
response = self.simulate_get(self.path, params={
self.query_param_name: value,
})
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Invalid parameter')
self.assertEqual(
response.json['description'],
'The "{}" parameter is invalid. '
'Should be "true" or "false".'.format(self.query_param_name)
)
class TestQueryParamsNonRequiredMixed(falcon.testing.TestCase):
def setUp(self):
self.app = app.api
self.path = '/nonrequired/mixed'
self.string_query_param_name = 'non_required_string'
self.number_query_param_name = 'non_required_number'
self.integer_query_param_name = 'non_required_integer'
self.boolean_query_param_name = 'non_required_boolean'
self.string_query_param_value = 'hello'
self.number_query_param_value = '55.66'
self.integer_query_param_value = '42'
self.boolean_query_param_value = 'true'
def test_no_params(self):
response = self.simulate_get(self.path, params={})
self.assertEqual(response.status, falcon.HTTP_200)
def test_all_params(self):
response = self.simulate_get(self.path, params={
self.string_query_param_name: self.string_query_param_value,
self.number_query_param_name: self.number_query_param_value,
self.integer_query_param_name: self.integer_query_param_value,
self.boolean_query_param_name: self.boolean_query_param_value,
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_only_string(self):
response = self.simulate_get(self.path, params={
self.string_query_param_name: self.string_query_param_value,
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_wrong_number(self):
response = self.simulate_get(self.path, params={
self.string_query_param_name: self.string_query_param_value,
self.number_query_param_name: 'hey',
self.integer_query_param_name: self.integer_query_param_value,
self.boolean_query_param_name: self.boolean_query_param_value,
})
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Invalid parameter')
self.assertEqual(
response.json['description'],
'The "{}" parameter is invalid. '
'Should be a number.'.format(self.number_query_param_name)
)
```
#### File: tests/templatedpath/app.py
```python
import falcon
import falconraml
class Dummy(object):
def on_get(self, request, response, some_id):
pass
api = falcon.API(middleware=[
falconraml.ParameterChecker('test/tests/templatedpath/spec.raml'),
])
api.add_route('/templated/{some_id}', Dummy())
```
#### File: tests/templatedpath/test.py
```python
import falcon
import falcon.testing
from tests.templatedpath import app
class TestTemplatedPath(falcon.testing.TestCase):
def setUp(self):
self.app = app.api
self.path = '/templated/9527'
self.query_param_name = 'required_string'
self.query_param_value = 'hello'
def test_success(self):
response = self.simulate_get(self.path, params={
self.query_param_name: self.query_param_value,
})
self.assertEqual(response.status, falcon.HTTP_200)
def test_missing_query_params(self):
response = self.simulate_get(self.path)
self.assertEqual(response.status, falcon.HTTP_400)
self.assertEqual(response.json['title'], 'Missing parameter')
self.assertEqual(
response.json['description'],
'The "{}" parameter is required.'.format(self.query_param_name)
)
``` |
{
"source": "johnlinp/telegram-good-timing-bot",
"score": 3
} |
#### File: goodtiming/core/database.py
```python
import os
import psycopg2
class DatabaseUniqueViolation(Exception):
pass
class Database:
def execute(self, query, variables):
def execute_with_cursor(cursor):
cursor.execute(query, variables)
def execute_with_error_convert():
self.with_cursor(execute_with_cursor)
self.with_error_convert(execute_with_error_convert)
def fetch(self, query, variables):
def fetch_with_cursor(cursor):
cursor.execute(query, variables)
return cursor.fetchall()
def fetch_with_error_convert():
return self.with_cursor(fetch_with_cursor)
return self.with_error_convert(fetch_with_error_convert)
def with_cursor(self, func):
with psycopg2.connect(os.environ.get('DATABASE_URL')) as connection:
with connection.cursor() as cursor:
return func(cursor)
def with_error_convert(self, func):
try:
return func()
except psycopg2.Error as e:
if isinstance(e, psycopg2.errors.UniqueViolation):
raise DatabaseUniqueViolation()
else:
raise e
```
#### File: goodtiming/modules/erase.py
```python
import re
from goodtiming.core.request import Request
from goodtiming.core.response import Response
import goodtiming.core.database
import goodtiming.util.stringutil
class EraseModule:
def parsers(self):
return [BareDoneParser(), PlanDoneParser(), BareCancelParser(), PlanCancelParser()]
def processors(self):
return [EraseProcessor()]
def renderers(self):
return [PlanNotFoundRenderer(), TooManyPlansRenderer(), PlanErasedRenderer()]
class PlanDoneParser:
def parse(self, message):
match = re.match(_(r'^(the one about )?(?P<plan_pattern>.+) is done$'), message, re.IGNORECASE)
if not match:
return None
return Request('ERASE', {
'action': 'DONE',
'plan_pattern': match.group('plan_pattern'),
})
class BareDoneParser:
def parse(self, message):
match = re.match(_(r"^(it's )?done$"), message, re.IGNORECASE)
if not match:
return None
return Request('ERASE', {
'action': 'DONE',
'plan_pattern': None,
})
class PlanCancelParser:
def parse(self, message):
match = re.match(_(r'^cancel (the one about )?(?P<plan_pattern>.+)$'), message, re.IGNORECASE)
if not match:
return None
return Request('ERASE', {
'action': 'CANCEL',
'plan_pattern': match.group('plan_pattern'),
})
class BareCancelParser:
def parse(self, message):
match = re.match(_(r'^cancel it$'), message, re.IGNORECASE)
if not match:
return None
return Request('ERASE', {
'action': 'CANCEL',
'plan_pattern': None,
})
class EraseProcessor:
def __init__(self):
self.database = goodtiming.core.database.Database()
def process(self, request, doer_id):
if request.kind != 'ERASE':
return None
plan_pattern = request.arguments['plan_pattern']
current_timing = self._get_current_timing(doer_id)
matched_plans = self._get_matched_plans(doer_id, current_timing, plan_pattern)
if len(matched_plans) == 0:
return Response('PLAN-NOT-FOUND', {
'plan_pattern': plan_pattern,
})
elif len(matched_plans) > 1:
return Response('TOO-MANY-PLANS', {
'matched_plans': matched_plans,
})
matched_plan = matched_plans[0]
self._delete_todo(doer_id, current_timing, matched_plan)
return Response('PLAN-ERASE', {
'action': request.arguments['action'],
'matched_plan': matched_plan,
})
def _get_current_timing(self, doer_id):
rows = self.database.fetch('SELECT current_timing FROM doer WHERE doer_id = %s', (doer_id,))
return rows[0][0]
def _get_matched_plans(self, doer_id, current_timing, plan_pattern):
if plan_pattern is None:
rows = self.database.fetch('SELECT plan FROM todo WHERE doer_id = %s AND timing LIKE %s', (doer_id, '%{}%'.format(current_timing)))
else:
rows = self.database.fetch('SELECT plan FROM todo WHERE doer_id = %s AND timing LIKE %s AND plan LIKE %s', (doer_id, '%{}%'.format(current_timing), '%{}%'.format(plan_pattern)))
return [row[0] for row in rows]
def _delete_todo(self, doer_id, current_timing, matched_plan):
self.database.execute('DELETE FROM todo WHERE doer_id = %s AND timing LIKE %s AND plan = %s', (doer_id, '%{}%'.format(current_timing), matched_plan))
class PlanNotFoundRenderer:
def render(self, response):
if response.kind != 'PLAN-NOT-FOUND':
return None
plan_pattern = response.arguments['plan_pattern']
if plan_pattern is None:
return _("I couldn't find anything.").format()
else:
return _("I couldn't find anything about {plan_pattern}.").format(plan_pattern=response.arguments['plan_pattern'])
class TooManyPlansRenderer:
def render(self, response):
if response.kind != 'TOO-MANY-PLANS':
return None
matched_plans = response.arguments['matched_plans']
return _('There are multiple things I found:\n{plans}\nPlease specify only one at a time.').format(plans=goodtiming.util.stringutil.format_items(matched_plans))
class PlanErasedRenderer:
def render(self, response):
if response.kind != 'PLAN-ERASE':
return None
if response.arguments['action'] == 'DONE':
return _('Great! I already marked "{plan}" as done.').format(plan=response.arguments['matched_plan'])
if response.arguments['action'] == 'CANCEL':
return _('Okay, I already cancelled "{plan}".').format(plan=response.arguments['matched_plan'])
raise AssertionError('should not happen')
``` |
{
"source": "John-Lin/RuleEngine",
"score": 3
} |
#### File: malware/core/database.py
```python
import sqlite3
import logging
logger = logging.getLogger(__name__)
class SQLiteTool(object):
def __init__(self):
self.conn = sqlite3.connect('url_reports.db')
def creat_url_report(self):
try:
self.conn.execute('''CREATE TABLE URLREPORTS
(ID TEXT PRIMARY KEY NOT NULL,
URL TEXT NOT NULL,
POSITIVE INT);''')
# print "Table created successfully"
except sqlite3.OperationalError, e:
pass
# print e
def creat_domain_report(self):
try:
self.conn.execute('''CREATE TABLE DOMAINREPORTS
(ID TEXT PRIMARY KEY NOT NULL,
DOMAIN TEXT NOT NULL,
POSITIVE INT);''')
# print "Table created successfully"
except sqlite3.OperationalError, e:
pass
# print e
def __update_pos(self, key, pos):
self.conn.execute("UPDATE URLREPORTS set POSITIVE = %d where ID = '%s'" % (pos, key))
# print "Update Operation done successfully";
self.conn.commit()
def __update_url(self, key, url):
self.conn.execute("UPDATE URLREPORTS set URL = '%s' where ID = '%s'" % (url, key))
# print "Update Operation done successfully";
self.conn.commit()
# TODO
# Updated the url from Virustotal not from pcap
# print "Total number of rows updated :", self.conn.total_changes
def __insert(self, key, url, pos):
try:
self.conn.execute('''INSERT INTO URLREPORTS (ID,URL,POSITIVE)
VALUES (?,?,?)''', (key, url, pos))
self.conn.commit()
except sqlite3.IntegrityError, e:
print '[ERROR]', e
def insert2(self, key, url, pos):
cursor = self.conn.execute("SELECT ID from URLREPORTS WHERE ID = '%s'" % key)
try:
key_in_table = cursor.next()[0]
except StopIteration:
key_in_table = None
if key_in_table and key_in_table == key:
logger.info("[SQL] Update it")
# if the URL in the table update it
self.__update_pos(key, pos)
self.__update_url(key, url)
else:
logger.info("[SQL] Insert the table")
# else insert the table
self.__insert(key, url, pos)
def show_positive(self, key):
cursor = self.conn.execute("SELECT POSITIVE from URLREPORTS WHERE ID = '%s'" % key)
try:
pos = int(cursor.next()[0])
except StopIteration:
pos = None
return pos
def show_url(self, key):
cursor = self.conn.execute("SELECT URL from URLREPORTS WHERE ID = '%s'" % key)
try:
url = str(cursor.next()[0])
except StopIteration:
url = None
return url
def is_key(self, key):
cursor = self.conn.execute("SELECT URL from URLREPORTS WHERE ID = '%s'" % key)
try:
key_in_table = cursor.next()[0]
key_in_table = True
except StopIteration:
key_in_table = False
return key_in_table
def show(self):
cursor = self.conn.execute("SELECT ID, URL, POSITIVE from URLREPORTS")
for row in cursor:
print "ID = ", row[0]
print "URL = ", row[1]
print "POSITIVE = ", row[2]
print "=== END ==="
def __del__(self):
self.conn.close()
def test():
import hashlib
url = 'www.google.com'
key = hashlib.sha1(url).hexdigest()
print "The key is: ", key
sql = SQLiteTool()
sql.creat()
sql.insert2(key, url, 100)
print "=== NOW TABLE ==="
sql.show()
print "=== SHOW POSITIVE ==="
print sql.show_positive(key)
print "=== SHOW URL ==="
print sql.show_url(key)
if __name__ == '__main__':
import hashlib
url = 'www.baidu.com/s?wd=m16M-MM-bM-9M-R'
key = hashlib.sha1(url).hexdigest()
print "The key is: ", key
sql = SQLiteTool()
# print "=== TABLE ==="
# sql.show()
print sql.show_url(key)
```
#### File: malware/core/decoder.py
```python
import dpkt
import socket
import binascii
def hexify(x):
h = binascii.hexlify(x)
tohex = " ".join(h[i:i+2] for i in range(0, len(h), 2))
return tohex
def truncate_dns(x):
return x[36:-12]
def _udp_iterator(pc):
for ts, pkt in pc:
try:
eth = dpkt.ethernet.Ethernet(pkt)
except dpkt.dpkt.NeedData:
continue
if eth.type == dpkt.ethernet.ETH_TYPE_IP:
ip = eth.data
if ip.p == dpkt.ip.IP_PROTO_UDP:
udp = ip.data
yield (ip.src, udp.sport, ip.dst, udp.dport, udp.data)
else:
pass
# Not UDP packets
else:
pass
# Not ether packets
return
def _tcp_iterator(pc):
for ts, pkt in pc:
try:
eth = dpkt.ethernet.Ethernet(pkt)
except dpkt.dpkt.NeedData:
continue
if eth.type == dpkt.ethernet.ETH_TYPE_IP:
ip = eth.data
if ip.p == dpkt.ip.IP_PROTO_TCP:
tcp = ip.data
yield (ip.src, tcp.sport, ip.dst, tcp.dport, tcp.data)
else:
pass
# Not TCP packets
else:
pass
# Not ether packets
return
def decode_dns_qd_name(pcap_path):
qd_name_list = []
five_tuple = []
conn = {}
fp = open(pcap_path)
pc = dpkt.pcap.Reader(fp)
unknown_opcode_counter = 0
for (src, sport, dst, dport, data) in _udp_iterator(pc):
if dport == 53:
key = (src, sport, dst, dport)
# UDP/53 is a DNS query
try:
dns = dpkt.dns.DNS(data)
conn[key] = [dns.qd[0].name, truncate_dns(hexify(data))]
except (dpkt.dpkt.UnpackError, IndexError):
unknown_opcode_counter += 1
# An unknown opcode maybe malicious traffic
# print unknown_opcode_counter
key = (src, sport, dst, dport, unknown_opcode_counter)
# print 'UNKNOWN_DNS_DATA:', hexify(data)
conn[key] = ['UNKNOWN_DNS', truncate_dns(hexify(data))]
# qd_name_list.append(dns.qd[0].name)
# five_tuple.append((src, sport, dst, dport))
# print truncate_dns(hexify(data))
# print "Query for", repr(dns.qd[0].name)
fp.close()
return conn
def decode_http_req_header(pcap_path):
host_list = []
uri_list = []
five_tuple = []
user_agent_list = []
fp = open(pcap_path)
pc = dpkt.pcap.Reader(fp)
for (src, sport, dst, dport, data) in _tcp_iterator(pc):
if dport == 80 and len(data) > 0:
key = (src, sport, dst, dport)
http_req = dpkt.http.Request(data)
# host_list.append(http_req.headers['host'])
# uri_list.append(http_req.uri)
# user_agent_list.append(http_req.headers['user-agent'])
# five_tuple.append((src, sport, dst, dport))
conn[key] = [http_req.headers['host'],
http_req.uri,
http_req.headers['user-agent']]
# print http_req.headers.keys()
# print "URI is ", http_req.uri
# for header in http_req.headers.keys() :
# pass
# print header, http_req.headers[header]
# print "method is ", http_req.method
# print "HTTP headers, packed ", http_req.pack()
# print "HTTP version", http_req.version
# print "HTTP data ", http_req.data
fp.close()
return conn
if __name__ == '__main__':
conn = decode_dns_qd_name('./2a.pcap')
print len(conn)
conn2 = decode_http_req_header('./2a.pcap')
print conn2
``` |
{
"source": "John-Lin/snort-socket",
"score": 3
} |
#### File: snort-socket/snortunsock/snort_listener.py
```python
import os
import socket
import logging
from . import alert
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
BUFSIZE = alert.AlertPkt._ALERTPKT_SIZE
def start_recv(sockfile=None):
'''Open a server on Unix Domain Socket'''
if sockfile is not None:
SOCKFILE = sockfile
else:
# default sockfile
SOCKFILE = "/tmp/snort_alert"
if os.path.exists(SOCKFILE):
os.unlink(SOCKFILE)
unsock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
unsock.bind(SOCKFILE)
logging.warning('Unix socket start listening...')
while True:
data = unsock.recv(BUFSIZE)
parsed_msg = alert.AlertPkt.parser(data)
if parsed_msg:
yield parsed_msg
if __name__ == '__main__':
start_recv()
``` |
{
"source": "John-Lin/wallofsheep",
"score": 2
} |
#### File: firebase_app/sniffer/sniffer.py
```python
import dpkt
import pcap
import re
import socket
import urlparse
import binascii
import signal
import sys
from firebase import firebase
from pprint import pprint
import settings
from utils import add_colons_to_mac
APP = {80: 'HTTP', 23: 'TELNET', 21: 'FTP', 110: 'POP3'}
class Sniffer(object):
def __init__(self, *args, **kwargs):
self._firebase = firebase.FirebaseApplication(settings.FIREBASE_URL,
None)
# Status update
self._firebase.patch('/status', {"status": "ON"})
pattern = 'tcp and dst port 80 or dst port 21'
# pattern = 'tcp and dst port 80 or dst port 21 or dst port 110'
self.pc = pcap.pcap(kwargs['interface'])
self.pc.setfilter(pattern)
self.all_user_info = {}
self.devices_mac = {}
self.info_counter = 0
def _is_host(self, content):
regex = re.compile('Host: (.*)')
return content is not None and regex.search(content)
def _is_pwd(self, content):
regex = re.compile('(.*)[password]=(.*)')
return content is not None and regex.search(content)
def _is_pwd_with_txt(self, content):
regex = re.compile('(.*)[txtPwd]=(.*)')
return content is not None and regex.search(content)
def _pick_ftp_info(self, data, client, server, dport, eth_src):
self.devices_mac.setdefault(add_colons_to_mac(eth_src), {})
self.devices_mac[add_colons_to_mac(eth_src)]['client'] = client
self.devices_mac[add_colons_to_mac(eth_src)]['server'] = server
self.devices_mac[add_colons_to_mac(eth_src)]['app'] = APP.get(dport)
self.devices_mac[add_colons_to_mac(eth_src)]['mac'] = (
add_colons_to_mac(eth_src))
if data.get('USER'):
self.devices_mac[add_colons_to_mac(eth_src)].update(
{'login': data.get('USER')})
if data.get('PASS'):
self.devices_mac[add_colons_to_mac(eth_src)].update(
{'password': data.get('PASS')})
device_info = self.devices_mac[add_colons_to_mac(eth_src)]
if 'login' and 'password' in device_info.keys():
print "FTP New Password get:"
pprint(self.devices_mac[add_colons_to_mac(eth_src)])
self._firebase.post('/pwd_table',
self.devices_mac[add_colons_to_mac(eth_src)])
# When push to firebase delete it
del self.devices_mac[add_colons_to_mac(eth_src)]
def _pick_http_info(self, data, client, server, dport, eth_src):
self.info_counter += 1
self.all_user_info[self.info_counter] = (
{'client': client, 'server': server,
'app': APP.get(dport),
'mac': add_colons_to_mac(binascii.hexlify(eth_src))}
)
if data.get('account'):
self.all_user_info[self.info_counter].update(
{'login': data.get('account')[0]})
elif data.get('username'):
self.all_user_info[self.info_counter].update(
{'login': data.get('username')[0]})
elif data.get('identification'):
self.all_user_info[self.info_counter].update({
'login': data.get('identification')[0]})
elif data.get('id'):
self.all_user_info[self.info_counter].update(
{'login': data.get('id')[0]})
elif data.get('os_username'):
self.all_user_info[self.info_counter].update(
{'login': data.get('os_username')[0]})
elif data.get('txtAccount'):
self.all_user_info[self.info_counter].update(
{'login': data.get('txtAccount')[0]})
elif data.get('email'):
self.all_user_info[self.info_counter].update(
{'login': data.get('email')[0]})
else:
self.all_user_info[self.info_counter].update({'login': None})
if data.get('password'):
self.all_user_info[self.info_counter].update(
{'password': data.get('password')[0]})
elif data.get('os_password'):
self.all_user_info[self.info_counter].update(
{'password': data.get('os_password')[0]})
elif data.get('txtPwd'):
self.all_user_info[self.info_counter].update(
{'password': data.get('txtPwd')[0]})
else:
self.all_user_info[self.info_counter].update({'password': None})
print "HTTP New Password get:"
pprint(self.all_user_info[self.info_counter])
self._firebase.post('/pwd_table', self.all_user_info[self.info_counter])
def _get_ftp_pop_payload(self, eth_pkt, ip_pkt, tcp_pkt):
if 'USER' in tcp_pkt.data:
regex = re.compile('USER (.*)')
user_obj = regex.search(tcp_pkt.data)
user_d = {'USER': user_obj.group(1).rstrip('\r')}
self._pick_ftp_info(user_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst), tcp_pkt.dport,
binascii.hexlify(eth_pkt.src))
elif 'PASS' in tcp_pkt.data:
regex = re.compile('PASS (.*)')
password_obj = regex.search(tcp_pkt.data)
password_d = {'PASS': password_obj.group(1).rstrip('\r')}
self._pick_ftp_info(password_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst), tcp_pkt.dport,
binascii.hexlify(eth_pkt.src))
elif 'user' in tcp_pkt.data:
regex = re.compile('user (.*)')
user_obj = regex.search(tcp_pkt.data)
user_d = {'USER': user_obj.group(1).rstrip('\r')}
self._pick_ftp_info(user_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst), tcp_pkt.dport,
binascii.hexlify(eth_pkt.src))
elif 'pass' in tcp_pkt.data:
regex = re.compile('pass (.*)')
password_obj = regex.search(tcp_pkt.data)
password_d = {'PASS': password_obj.group(1).rstrip('\r')}
self._pick_ftp_info(password_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst), tcp_pkt.dport,
binascii.hexlify(eth_pkt.src))
else:
return
def _get_http_payload(self, eth_pkt, ip_pkt, tcp_pkt):
try:
http_req = dpkt.http.Request(tcp_pkt.data)
if http_req.method == 'POST':
# This is POST method
pass
except dpkt.dpkt.UnpackError:
pass
if 'POST' in tcp_pkt.data:
# print 'POST', tcp.data
if 'password=' in tcp_pkt.data:
# print 'In POST packet password', tcp.data
pwd_obj = self._is_pwd(tcp_pkt.data)
if pwd_obj:
# print 'query string found:', pwd_obj.group(0)
qs_d = urlparse.parse_qs(pwd_obj.group(0))
# print qs_d
self._pick_http_info(qs_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst),
tcp_pkt.dport, eth_pkt.src)
elif 'password=' in tcp_pkt.data:
# print 'password', tcp.data
qs_d = urlparse.parse_qs(tcp_pkt.data)
# print qs_d
self._pick_http_info(qs_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst),
tcp_pkt.dport, eth_pkt.src)
elif 'txtPwd=' in tcp_pkt.data:
qs_d = urlparse.parse_qs(tcp_pkt.data)
self._pick_http_info(qs_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst),
tcp_pkt.dport, eth_pkt.src)
elif 'email=' in tcp_pkt.data:
qs_d = urlparse.parse_qs(tcp_pkt.data)
self._pick_http_info(qs_d, socket.inet_ntoa(ip_pkt.src),
socket.inet_ntoa(ip_pkt.dst),
tcp_pkt.dport, eth_pkt.src)
else:
return
# Moocs dst IP 192.168.3.11
# Kits dst IP 192.168.3.11
# iLMS dst IP 172.16.58.3
def loop(self):
while True:
result = self._firebase.get('/status', None)
if result.get('status') == 'ON':
try:
for ts, buf in self.pc:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
tcp = ip.data
if len(tcp.data) > 0:
# print 'Packet in dst port number', tcp.dport
# make sure the pattern is correct
if tcp.dport == 80:
self._get_http_payload(eth, ip, tcp)
elif tcp.dport == 21 or tcp.dport == 110:
self._get_ftp_pop_payload(eth, ip, tcp)
else:
pass
except KeyboardInterrupt:
nrecv, ndrop, nifdrop = self.pc.stats()
print '\n%d packets received by filter' % nrecv
print '%d packets dropped by kernel' % ndrop
break
except (NameError, TypeError):
# print "No packet"
continue
else:
signal.signal(signal.SIGINT, lambda s, f: sys.exit(0))
print "I can not see packets."
continue
def __del__(self):
# Status update
self._firebase.patch('/status', {"status": "OFF"})
if __name__ == "__main__":
s = Sniffer(interface='eth2')
print '%s is listening on' % s.pc.name
s.loop()
``` |
{
"source": "johnlito123/electrum-xuez",
"score": 2
} |
#### File: p4a/xevan_hash/__init__.py
```python
from pythonforandroid.recipe import CythonRecipe
class XevanHashRecipe(CythonRecipe):
url = 'https://files.pythonhosted.org/packages/e3/a9/6e74bab6b3bc97e5f68f8c1fbc9a4e4ad1617ba167fa004511b1f9288c64/xevan_hash-0.2.3.tar.gz'
md5sum = 'ff2b5a650fbcd718109137da372c99aa'
version = '0.2.3'
depends = ['python3crystax']
def should_build(self, arch):
"""It's faster to build than check"""
return True
recipe = XevanHashRecipe()
``` |
{
"source": "johnlito123/Xuez_AI",
"score": 2
} |
#### File: xuex-ai/config/base.py
```python
from abc import ABCMeta
from programy.utils.logging.ylogger import YLogger
from programy.utils.substitutions.substitues import Substitutions
class BaseConfigurationData(object):
__metaclass__ = ABCMeta
def __init__(self, name):
assert (name is not None)
self._section_name = name
self._additionals = {}
def exists(self, name):
assert (name is not None)
return bool(name in self._additionals)
def value(self, key):
assert (key is not None)
if key in self._additionals:
return self._additionals[key]
else:
YLogger.warning(self, "Configuration key [%s] does not exist", key)
return None
@property
def section_name(self):
return self._section_name
@property
def id(self):
return self._section_name
def _get_file_option(self, config_file, option_name, section, bot_root, subs: Substitutions=None):
assert (config_file is not None)
assert (option_name is not None)
option = config_file.get_option(section, option_name, subs=subs)
if option is not None:
option = self.sub_bot_root(option, bot_root, subs=subs)
return option
def sub_bot_root(self, text, root):
assert text is not None
assert root is not None
return text.replace('$BOT_ROOT', root)
def additionals_to_add(self):
return []
def load_additional_key_values(self, file_config, service, subs: Substitutions = None):
if file_config:
if service:
for key in file_config.get_keys(service):
if key in self.additionals_to_add():
value = file_config.get_option(service, key, subs=subs)
self._additionals[key] = value
def _extract_license_key(self, attr, license_keys):
if attr is not None:
if "LICENSE:" in attr:
if license_keys.has_key(attr[8:]):
return license_keys.get_key(attr[8:])
return attr
def check_for_license_keys(self, license_keys):
return
def config_to_yaml(self, data, config, defaults=True):
assert (data is not None)
assert (config is not None)
data[config.id] = {}
config.to_yaml(data[config.id], defaults)
def to_yaml(self, data, defaults=True):
pass
```
#### File: config/brain/oobs.py
```python
from programy.utils.logging.ylogger import YLogger
from programy.config.section import BaseSectionConfigurationData
from programy.config.brain.oob import BrainOOBConfiguration
from programy.utils.substitutions.substitues import Substitutions
class BrainOOBSConfiguration(BaseSectionConfigurationData):
def __init__(self):
BaseSectionConfigurationData.__init__(self, "oob")
self._default = None
self._oobs = {}
def exists(self, name):
if name == 'default':
return bool(self._default is not None)
return bool(name in self._oobs)
def default(self):
return self._default
def oob(self, name):
if name in self._oobs:
return self._oobs[name]
return None
def oobs(self):
return self._oobs.keys()
def check_for_license_keys(self, license_keys):
BaseSectionConfigurationData.check_for_license_keys(self, license_keys)
def load_config_section(self, configuration_file, configuration, bot_root, subs: Substitutions = None):
oobs = configuration_file.get_section("oob", configuration)
if oobs is not None:
oob_keys = configuration_file.get_keys(oobs)
for name in oob_keys:
oob = BrainOOBConfiguration(name)
oob.load_config_section(configuration_file, oobs, bot_root, subs=subs)
if name == 'default':
self._default = oob
else:
self._oobs[name] = oob
else:
YLogger.warning(self, "Config section [oobs] missing from Brain, no oobs loaded")
def to_yaml(self, data, defaults=True):
if defaults is True:
data['default'] = {'classname': 'programy.oob.defaults.default.DefaultOutOfBandProcessor'}
data['alarm'] = {'classname': 'programy.oob.defaults.alarm.AlarmOutOfBandProcessor'}
data['camera'] = {'classname': 'programy.oob.defaults.camera.CameraOutOfBandProcessor'}
data['clear'] = {'classname': 'programy.oob.defaults.clear.ClearOutOfBandProcessor'}
data['dial'] = {'classname': 'programy.oob.defaults.dial.DialOutOfBandProcessor'}
data['dialog'] = {'classname': 'programy.oob.defaults.dialog.DialogOutOfBandProcessor'}
data['email'] = {'classname': 'programy.oob.defaults.email.EmailOutOfBandProcessor'}
data['geomap'] = {'classname': 'programy.oob.defaults.map.MapOutOfBandProcessor'}
data['schedule'] = {'classname': 'programy.oob.defaults.schedule.ScheduleOutOfBandProcessor'}
data['search'] = {'classname': 'programy.oob.defaults.search.SearchOutOfBandProcessor'}
data['sms'] = {'classname': 'programy.oob.defaults.sms.SMSOutOfBandProcessor'}
data['url'] = {'classname': 'programy.oob.defaults.url.URLOutOfBandProcessor'}
data['wifi'] = {'classname': 'programy.oob.defaults.wifi.WifiOutOfBandProcessor'}
else:
if self._default is not None:
self.config_to_yaml(data, self._default, defaults)
for oob in self._oobs:
self.config_to_yaml(data, oob, defaults)
```
#### File: security/linking/accountlinker.py
```python
import string
import random
import datetime
"""
Workflow is
1. User is associated with an initial PRIMARY Client, e.g Facebook
2. User decides to link account and ask PY to initiate an action
3. PY asks them to login into primary account and ask for a LINK TOKEN
As part of the process they are asked to provide a SECRET they know
PY then provides a LINK TOKEN
User now has
PRIMARY ACCOUNT ID
PRIMARY ACCOUNT NAME
GIVEN TOKEN
GENERATED TOKEN
-> LINK PRIMARY ACCOUNT $USERID $ACCOUNTNAME $GIVENTOKEN
<- PRIMARY ACCOUNT LINKED $GENERATEDTOKEN
<-> PRIMARY ACCOUNT LINK FAILED $REASON
Link has a expirary time, circa 1 hr, after which link expires and now tokens will need to be requested
4. PY now tells them to log into the client they want to link
5. User logs into secondary account and asks to link this to primary account
6. PY Asks for
PRIMARY ACCOUNT ID
PRIMARY ACCOUNT NAME
GIVEN TOKEN
GENERATED TOKEN
-> LINK SECONDARY ACCOUNT $SECONDARY_USERID $SECONDARY_ACCOUNT_NAME $PRIMARY_USERID $PRIMARY_ACCOUNT_NAME $GIVEN_TOKEN $GENERATED_TOKEN
<- SECONDARY ACCOUNT LINKED
<- SECONDARY ACCOUNT LINK FAILED $REASON
7. PY Links accounts
"""
class BasicAccountLinkerService(object):
KEY_CHARS = string.ascii_uppercase + string.digits
TWENTY_FOUR_HOURS = 24 * 60 * 60
MAX_RETRIES = 3
def __init__(self, storage_engine):
self._storage_engine = storage_engine
def initialise(self, client):
pass
def link_user_to_client(self, userid, clientid):
assert (userid is not None)
assert (clientid is not None)
if self._storage_engine.user_store().exists(userid, clientid) is True:
return True
if self._storage_engine.user_store().add_user(userid, clientid) is not None:
return True
return False
def linked_accounts(self, userid):
assert (userid is not None)
return self._storage_engine.user_store().get_links(userid)
def unlink_user_from_client(self, userid, clientid):
assert (userid is not None)
assert (clientid is not None)
if self._storage_engine.user_store().remove_user(userid, clientid) is True:
if self._storage_engine.link_store().remove_link(userid) is True:
if self._storage_engine.linked_account_store().unlink_accounts(userid) is True:
return True
return False
def unlink_user_from_all_clients(self, userid):
assert (userid is not None)
if self._storage_engine.user_store().remove_user_from_all_clients(userid) is True:
if self._storage_engine.link_store().remove_link(userid) is True:
if self._storage_engine.linked_account_store().unlink_accounts(userid) is True:
return True
return False
def _generate_key(self, size=8):
return ''.join(random.choice(BasicAccountLinkerService.KEY_CHARS) for _ in range(size))
def _generate_expirary(self, lifetime):
return datetime.datetime.now() + datetime.timedelta(seconds=lifetime)
def generate_link(self, userid, provided_key, lifetime=TWENTY_FOUR_HOURS):
assert (userid is not None)
assert (provided_key is not None)
generated_key = self._generate_key()
expires = self._generate_expirary(lifetime)
if self._storage_engine.link_store().create_link(userid, provided_key, generated_key, expires) is not None:
return generated_key
return None
def _has_link_expired(self, link):
assert (link is not None)
now = datetime.datetime.now()
if now > link.expires:
return True
return False
def _expire_link(self, link):
assert (link is not None)
link.expired = True
self._storage_engine.link_store().update_link(link)
def _valid_link_keys(self, link, provided_key, generated_key, max_retries):
assert (link is not None)
if link.generated_key == generated_key:
if link.provided_key == provided_key:
if link.retry_count < max_retries:
return True
return False
def _inc_retry_count(self, link):
assert (link is not None)
link.retry_count += 1
self._storage_engine.link_store().update_link(link)
def link_accounts(self, userid, provided_key, generated_key, linked_userid, linked_client):
assert (userid is not None)
assert (provided_key is not None)
assert (generated_key is not None)
assert (linked_userid is not None)
assert (linked_client is not None)
link = self._storage_engine.link_store().get_link(userid)
if link is not None:
if link.expired is False:
if self._has_link_expired(link) is True:
self._expire_link(link)
elif self._valid_link_keys(link, provided_key, generated_key, BasicAccountLinkerService.MAX_RETRIES) is False:
self._inc_retry_count(link)
elif self._storage_engine.user_store().add_user(linked_userid, linked_client) is not None:
if self._storage_engine.linked_account_store().link_accounts(userid, linked_userid) is not None:
return True
return False
def reset_link(self, userid):
assert (userid is not None)
link = self._storage_engine.link_store().get_link(userid)
if link is not None:
link.retry_count = 0
self._storage_engine.link_store().update_link(link)
return True
return False
def primary_account(self, secondary_userid):
assert (secondary_userid is not None)
return self._storage_engine.linked_account_store().primary_account(secondary_userid)
``` |
{
"source": "johnliu0/ece250-testing-server",
"score": 3
} |
#### File: ece250-testing-server/app/main.py
```python
import os
from flask import (
current_app as flask_app,
render_template,
g,
url_for,
session)
from app import db, auth, projects, users
"""Initialize server."""
# directory for where temporary files will be placed
flask_app.config['UPLOAD_DIR'] = os.path.expanduser('~')
db.init()
@flask_app.route('/')
def index():
"""Homepage."""
return render_template('index.html')
"""/auth"""
flask_app.register_blueprint(auth.bp)
"""/projects"""
flask_app.register_blueprint(projects.bp)
"""/users"""
flask_app.register_blueprint(users.bp)
```
#### File: ece250-testing-server/app/users.py
```python
from flask import (
Blueprint,
current_app as flask_app)
from models import user
bp = Blueprint('users', __name__, url_prefix='/users')
@bp.route('/', methods=['POST'])
def create_user():
return 'facts'
@bp.route('/validate/<token>', methods=['GET'])
def validate_user(token):
print(token)
return 'validated!'
``` |
{
"source": "johnliu123/deid",
"score": 2
} |
#### File: deid/tests/test_dicom_utils.py
```python
import unittest
import tempfile
import shutil
import json
import os
from deid.utils import get_installdir
from deid.data import get_dataset
class TestDicomUtils(unittest.TestCase):
def setUp(self):
self.pwd = get_installdir()
self.deid = os.path.abspath("%s/../examples/deid/deid.dicom" %self.pwd)
self.dataset = get_dataset('dicom-cookies')
self.tmpdir = tempfile.mkdtemp()
print("\n######################START######################")
def tearDown(self):
shutil.rmtree(self.tmpdir)
print("\n######################END########################")
def test_get_files(self):
print("Test test_get_files")
print("Case 1: Test get files from dataset")
from deid.dicom import get_files
from deid.config import load_deid
found = 0
for dicom_file in get_files(self.dataset):
found += 1
expected = 7
self.assertEqual(found, expected)
print("Case 2: Ask for files from empty folder")
found = 0
for dicom_file in get_files(self.tmpdir):
found += 1
expected = 0
self.assertEqual(found, expected)
def test_get_files_as_list(self):
print("Test test_get_files_as_list")
print("Case 1: Test get files from dataset")
from deid.dicom import get_files
from deid.config import load_deid
dicom_files = list(get_files(self.dataset))
found = len(dicom_files)
expected = 7
self.assertEqual(found, expected)
print("Case 2: Ask for files from empty folder")
dicom_files = list(get_files(self.tmpdir))
found = len(dicom_files)
expected = 0
self.assertEqual(found, expected)
def test_parse_action(self):
print("Test test_parse_action")
from deid.dicom.utils import perform_action
dicom = get_dicom(self.dataset)
print("Case 1: Testing ADD action")
self.assertTrue("PatientIdentityRemoved" not in dicom)
ADD = {"action":"ADD",
"field":"PatientIdentityRemoved",
"value":"Yes"}
dicom = perform_action(dicom=dicom,action=ADD)
self.assertTrue("PatientIdentityRemoved" in dicom)
self.assertEqual(dicom.get("PatientIdentityRemoved"),"Yes")
print("Case 2: Testing REPLACE action with string")
REPLACE = { "action":"REPLACE",
"field":"PatientIdentityRemoved",
"value":"No"}
dicom = perform_action(dicom=dicom,action=REPLACE)
self.assertTrue("PatientIdentityRemoved" in dicom)
self.assertEqual(dicom.get("PatientIdentityRemoved"),"No")
print("Case 3: Testing REPLACE action with variable")
item = {"fish":"stick"}
REPLACE = { "action":"REPLACE",
"field":"PatientIdentityRemoved",
"value":"var:fish"}
dicom = perform_action(dicom=dicom,action=REPLACE,item=item)
self.assertEqual(dicom.get("PatientIdentityRemoved"),"stick")
print("Case 4: Testing REPLACE action with non-existing variable")
REPLACE = { "action":"REPLACE",
"field":"PatientIdentityRemoved",
"value":"var:gummybear"}
updated = perform_action(dicom=dicom,action=REPLACE,item=item)
self.assertEqual(updated,None)
print("Case 5: Testing REMOVE action")
REMOVE = { "action":"REMOVE",
"field":"PatientIdentityRemoved"}
dicom = perform_action(dicom=dicom,action=REMOVE)
self.assertTrue("PatientIdentityRemoved" not in dicom)
print("Case 6: Testing invalid action")
RUN = { "action":"RUN",
"field":"PatientIdentityRemoved"}
updated = perform_action(dicom=dicom,action=RUN)
self.assertEqual(updated,None)
def test_entity_timestamp(self):
from deid.dicom.utils import get_entity_timestamp
print("Test test_entity_timestamp")
print("Case 1: field is empty returns None")
dicom = get_dicom(self.dataset)
ts = get_entity_timestamp(dicom)
self.assertEqual(ts,None)
print("Case 2: field not empty")
dicom.PatientBirthDate = "8/12/1962"
ts = get_entity_timestamp(dicom)
self.assertEqual(ts,'1962-08-12T00:00:00Z')
def test_item_timestamp(self):
from deid.dicom.utils import get_item_timestamp
print("Test test_item_timestamp")
print("Case 1: field is empty returns None")
dicom = get_dicom(self.dataset)
ts = get_item_timestamp(dicom)
self.assertEqual(ts,None)
print("Case 2: field not empty")
from deid.dicom.utils import perform_action
ADD = {"action":"ADD",
"field":"InstanceCreationDate",
"value":"1/1/2010"}
dicom = perform_action(action=ADD,dicom=dicom)
ts = get_item_timestamp(dicom)
self.assertEqual(ts,'2010-01-01T00:00:00Z')
def get_dicom(dataset):
'''helper function to load a dicom
'''
from deid.dicom import get_files
from pydicom import read_file
dicom_files = get_files(dataset)
return read_file(next(dicom_files))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "johnliu4/traffic-cv",
"score": 3
} |
#### File: johnliu4/traffic-cv/ai.py
```python
import sys
import config
import convnet
import region
import matplotlib.pyplot as plt
import numpy as np
import classifier
import matplotlib.patches as patches
from matplotlib.widgets import Button
from keras.preprocessing import image
from skimage import transform
from os import walk
from os.path import join
from random import randint
from joblib import load as load_weights
from time import time_ns
from PIL import Image as PILImage
def train():
classifier.train()
def predict(img_path):
classifier.load()
raw_img = image.load_img(img_path)
# scale image to desired size
resized_img = resize_image(raw_img, height=config.input_image_target_height)
# propose bounding box regions for image
img = image.img_to_array(resized_img)
boxes = region.propose_boxes(img)
detected_boxes = []
ax, (f1, f2) = plt.subplots(1, 2)
f1.imshow(resized_img)
f1.set_xlabel('Proposed regions')
f1.set_yticklabels([])
f1.set_xticklabels([])
f2.imshow(resized_img)
f2.set_xlabel('Predicted')
f2.set_yticklabels([])
f2.set_xticklabels([])
print('Extracting features from regions and classifying. ', end='')
timer = time_ns()
counter = 0
for idx, box in enumerate(boxes):
box_img = img[box.min_y : box.max_y, box.min_x : box.max_x]
box_img = transform.resize(box_img, config.convnet_image_input_size + (3,))
f1.add_patch(patches.Rectangle(
(box.min_x, box.min_y),
box.width,
box.height,
linewidth=1,
color='red',
fill=False
))
box_img_reshaped = box_img.reshape((1,) + box_img.shape)
convnet_output = convnet.extract_features(box_img_reshaped)
convnet_output = convnet_output.reshape((1,) + convnet_output.shape)
svm_output = classifier.predict(convnet_output)
if svm_output == 1:
counter += 1
f2.add_patch(patches.Rectangle(
(box.min_x, box.min_y),
box.width,
box.height,
linewidth=1,
color='green',
fill=False
))
print(f'Done in {(time_ns() - timer) / 1000000000}s.')
print(counter, '/', len(boxes))
plt.show()
yes_counter = 0
no_counter = 0
def button_yes(svm_output, img):
global yes_counter
print(f'SVM output: {svm_output}, Actual: 1')
existing_files = set()
for (_, _, file_names) in walk(config.training_positives_dir):
for file in file_names:
existing_files.add(file)
# svm failed to detect positive sample
if svm_output == 0:
file_name = f'img_{yes_counter}.jpg'
yes_counter += 1
while file_name in existing_files:
file_name = f'img_{yes_counter}.jpg'
yes_counter += 1
img.save(join(config.training_positives_dir, file_name))
plt.close()
def button_no(svm_output, img):
global no_counter
print(f'SVM output: {svm_output}, Actual: 0')
existing_files = set()
for (_, _, file_names) in walk(config.training_negatives_dir):
for file in file_names:
existing_files.add(file)
# svm failed to detect negative sample
if svm_output == 1:
file_name = f'img_{no_counter}.jpg'
no_counter += 1
while file_name in existing_files:
file_name = f'img_{no_counter}.jpg'
no_counter += 1
img.save(join(config.training_negatives_dir, file_name))
plt.close()
def mine(img_path, use_predicted=False):
"""Tool for making negative and positive training samples.
The bounding boxes used in the predict method are shown. Press yes or no as
appropriate to confirm whether or not a sample is positive or negative. If
the SVM was incorrect in its output, then a training sample will be made
in the training negatives and positives directory specified in the config.
Images are called img_x.jpg, where x is a non-negative integer. This tool
will not overwrite images, and will instead find the first non-negative
integer that it can save to without overwriting an existing file. This
allows the tool to seamlessly and easily add to the existing datasets.
Args:
img_path: Path to image to mine.
use_predicted: Whether or not to use the resulting bounding boxes from
after the predictions are made. This is useful for when all the
correct boxes have been made, but there are additional false
positives.
"""
classifier.load()
raw_img = image.load_img(img_path)
# scale image to desired size
resized_img = resize_image(raw_img, height=config.input_image_target_height)
img = image.img_to_array(resized_img)
boxes = region.propose_boxes(img)
shown_boxes = []
for idx, box in enumerate(boxes):
box_img = img[box.min_y : box.max_y, box.min_x : box.max_x]
pil_box_img = image.array_to_img(box_img)
box_img = transform.resize(box_img, config.convnet_image_input_size + (3,))
box_img_reshaped = box_img.reshape((1,) + box_img.shape)
convnet_output = convnet.extract_features(box_img_reshaped)
convnet_output = convnet_output.reshape((1,) + convnet_output.shape)
svm_output = classifier.predict(convnet_output)
if use_predicted:
if svm_output == 1:
shown_boxes.append((box, svm_output))
else:
shown_boxes.append((box, svm_output))
for idx, (box, svm_output) in enumerate(shown_boxes):
box_img = img[box.min_y : box.max_y, box.min_x : box.max_x]
pil_box_img = image.array_to_img(box_img)
plt.imshow(pil_box_img)
plt.axis('off')
axyes = plt.axes([0.7, 0.05, 0.1, 0.075])
axno = plt.axes([0.81, 0.05, 0.1, 0.075])
print(f'Box {idx + 1} of {len(shown_boxes)}: ', end='')
yes = Button(axyes, 'Yes')
yes.on_clicked(lambda _: button_yes(svm_output, pil_box_img))
no = Button(axno, 'No')
no.on_clicked(lambda _: button_no(svm_output, pil_box_img))
plt.show()
def resize_image(img, height):
"""Resizes a PIL image to a specified height while keeping the aspect ratio
the same.
"""
aspect_ratio = img.size[0] / img.size[1]
width = int(np.floor(height * aspect_ratio))
return img.resize((width, height))
```
#### File: johnliu4/traffic-cv/augment.py
```python
import sys
import numpy as np
import matplotlib.pyplot as plt
from keras.preprocessing import image
# data augmentation with Keras ImageDataGenerator
pos_data_gen = image.ImageDataGenerator(
rotation_range=5,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest',
rescale=1./255)
neg_data_gen = image.ImageDataGenerator(
rotation_range=20,
width_shift_range=0.20,
height_shift_range=0.20,
shear_range=0.20,
zoom_range=0.20,
horizontal_flip=True,
fill_mode='nearest',
rescale=1./255)
def generate_pos(
img_path,
target_size,
num_imgs=5):
"""Loads an image and generates random augmentations."""
img = image.load_img(img_path, target_size=target_size)
x = image.img_to_array(img)
x = x.reshape((1,) + x.shape)
augmentations = []
i = 0
for batch in pos_data_gen.flow(x, batch_size=1):
augmentations.append(batch[0])
i += 1
if i >= num_imgs:
break
return np.asarray(augmentations)
def generate_neg(
img_path,
target_size,
num_imgs=5):
"""Loads an image and generates random augmentations."""
img = image.load_img(img_path, target_size=target_size)
x = image.img_to_array(img)
x = x.reshape((1,) + x.shape)
augmentations = []
i = 0
for batch in neg_data_gen.flow(x, batch_size=1):
augmentations.append(batch[0])
i += 1
if i >= num_imgs:
break
return np.asarray(augmentations)
def show_augmentations(img_path, target_size):
"""Shows various augmentations of a particular image."""
img = image.load_img(img_path, target_size=target_size)
x = image.img_to_array(img)
x = x.reshape((1,) + x.shape)
i = 1
fig, ax = plt.subplots(4, 5)
ax[0, 0].imshow(image.array_to_img(img))
ax[0, 0].axis('off')
for batch in data_gen.flow(x, batch_size=1):
ax[i // 5, i % 5].axis('off')
ax[i // 5, i % 5].imshow(image.array_to_img(batch[0]))
i += 1
if i >= 4 * 5:
break
plt.show()
``` |
{
"source": "johnliu55tw/game-of-life",
"score": 3
} |
#### File: game-of-life/game_of_life/model.py
```python
from functools import wraps
class OutOfBoundError(Exception):
"""Exception for coordinate values out of size limit."""
pass
def check_boundary(f):
@wraps(f)
def wrapper(self, x, y, *args, **kwargs):
if x < 0 or x >= self._size[0] or y < 0 or y >= self._size[1]:
raise OutOfBoundError('{}, {}'.format(x, y))
else:
return f(self, x, y, *args, **kwargs)
return wrapper
class World(object):
def __init__(self, x, y):
if x <= 0:
raise ValueError('x must be larger than 0')
if y <= 0:
raise ValueError('y must be larger than 0')
self._size = (x, y)
self._alives = set()
self._corners = ((0, 0), (x-1, 0), (0, y-1), (x-1, y-1))
@property
def size(self):
return self._size
@property
def alives(self):
return tuple(self._alives)
@check_boundary
def set_alive(self, x, y):
self._alives.add((x, y))
@check_boundary
def set_dead(self, x, y):
if self.is_alive(x, y):
self._alives.remove((x, y))
@check_boundary
def is_alive(self, x, y):
return (x, y) in self._alives
@check_boundary
def toggle_aliveness(self, x, y):
if self.is_alive(x, y):
self.set_dead(x, y)
else:
self.set_alive(x, y)
def _calc_neighbors(self, x, y):
all_nbrs = ((x-1, y-1),
(x-1, y),
(x-1, y+1),
(x, y-1),
(x, y+1),
(x+1, y-1),
(x+1, y),
(x+1, y+1))
return tuple(
(x, y) for x, y in all_nbrs
if x >= 0 and x < self._size[0] and y >= 0 and y < self._size[1])
def _calc_aliveness(self, x, y):
neighbors = self._calc_neighbors(x, y)
alive_nbrs_count = len(tuple(nbr for nbr in neighbors
if self.is_alive(nbr[0], nbr[1])))
if self.is_alive(x, y):
if alive_nbrs_count in (0, 1):
return False
elif alive_nbrs_count in (2, 3):
return True
elif alive_nbrs_count >= 4:
return False
else:
if alive_nbrs_count == 3:
return True
else:
return False
def advance(self):
next_alives = set()
for x, y in self._alives:
nbrs = self._calc_neighbors(x, y)
for nbr in nbrs:
if self._calc_aliveness(nbr[0], nbr[1]):
next_alives.add(nbr)
self._alives = next_alives
class Pattern(object):
def __init__(self, name, alives):
self._name = name
self._alives = tuple(alives)
def __repr__(self):
return '<Pattern "{}">: {}'.format(self._name, repr(self._alives))
@property
def alives(self):
return tuple(self._alives)
@property
def name(self):
return self._name
def as_screen_coordinate(self, width, height):
if not self._alives:
return tuple()
min_width = 1 + 2 * max(abs(coor[0]) for coor in self._alives)
min_height = 1 + 2 * max(abs(coor[1]) for coor in self._alives)
if width < min_width or height < min_height:
raise ValueError('Size must be larger than width: {}, height: {}.'.format(
min_width, min_height))
result = tuple((c[0] + int(width / 2), -c[1] + int(height / 2))
for c in self._alives)
return result
Patterns = [
Pattern('Clear', []),
Pattern('Glider', [(1, 0), (0, 1), (-1, -1), (0, -1), (1, -1)]),
Pattern('Small Exploder', [(0, 0), (1, 0), (-1, 0), (0, 1), (-1, -1), (1, -1), (0, -2)]),
Pattern('Exploder', [(0, 2), (0, -2), (-2, 2), (-2, 1), (-2, 0), (-2, -1), (-2, -2),
(2, 2), (2, 1), (2, 0), (2, -1), (2, -2)])
]
```
#### File: game-of-life/game_of_life/presenter.py
```python
import logging
import tkinter
from .view import MainView
from .model import World, Patterns
logger = logging.getLogger(__name__)
class GameOfLifePresenter(object):
def __init__(self, width, height, min_delay):
self.root = tkinter.Tk()
self.main_view = MainView(width, height,
pattern_options=[p.name for p in Patterns],
master=self.root)
self.world = World(width, height)
default_pattern = Patterns[0]
for x, y in default_pattern.as_screen_coordinate(width, height):
self.world.set_alive(x, y)
self.size = (width, height)
self.min_delay = min_delay
self._timer_delay = None
# Initial speed. Don't change it or the speed will differ from the speed
# scroller!
self.set_speed(0.1)
self.stop()
# Must use bind_all to capture event
self.main_view.bind_all('<<Cell-Click>>', self.on_cell_click)
self.main_view.bind_all('<<StartStop-Toggle>>', self.on_startstop_toggle)
self.main_view.bind_all('<<Next-Click>>', self.on_next_click)
self.main_view.bind_all('<<Speed-Change>>', self.on_speed_change)
self.main_view.bind_all('<<PatternOption-Change>>', self.on_pattern_option_change)
@property
def is_running(self):
return self._is_running
def run(self):
self.main_view.update(alives=self.world.alives)
self.stop()
self.root.mainloop()
def start(self):
self._is_running = True
self.main_view.update(startstop_text='Stop')
self.root.after(self._timer_delay, self.on_timer)
def stop(self):
self._is_running = False
self.main_view.update(startstop_text='Start')
def set_speed(self, scale):
if scale <= 0 or scale > 1:
raise ValueError('Speed must be within 0 < scale <= 1')
new_delay = int(self.min_delay / scale)
logger.debug('Change delay to {}'.format(new_delay))
self._timer_delay = new_delay
def on_timer(self):
if self._is_running:
self.world.advance()
self.main_view.update(alives=self.world.alives)
self.root.after(self._timer_delay, self.on_timer)
def on_cell_click(self, event):
logger.debug('on_cell_click! X:{}, Y:{}'.format(event.x, event.y))
x, y = event.x, event.y
self.world.toggle_aliveness(x, y)
self.main_view.update(alives=self.world.alives)
def on_startstop_toggle(self, event):
logger.debug('StartStop Toggled!')
if self._is_running:
self.stop()
else:
self.start()
def on_next_click(self, event):
self.world.advance()
self.main_view.update(alives=self.world.alives)
def on_speed_change(self, event):
logger.debug('Speed change event: {}'.format(event.x))
self.set_speed(event.x/100)
def on_pattern_option_change(self, event):
logger.debug('Option Menu change, index: {}'.format(event.x))
pattern = Patterns[event.x]
self.world = World(self.size[0], self.size[1])
for alive_cell in pattern.as_screen_coordinate(self.size[0], self.size[1]):
self.world.set_alive(alive_cell[0], alive_cell[1])
self.main_view.update(alives=self.world.alives)
``` |
{
"source": "johnliu55tw/pelican-cjk-correct-spaces",
"score": 3
} |
#### File: johnliu55tw/pelican-cjk-correct-spaces/test_pelican_cjk.py
```python
import re
import unittest
from unittest import mock
import pelican_cjk
class RangesAsRegexTestCase(unittest.TestCase):
def test_ranges_with_start_and_end(self):
data = (
('a', 'z'),
('\u00A0', '\u00AF'),
)
re_range = pelican_cjk.ranges_as_regex(data)
self.assertEqual(re_range, '[a-z\u00A0-\u00AF]')
def test_ranges_without_end(self):
data = (
('A', ''),
('\u00A0\u00A1', ''),
('!@#$%', ''),
)
re_range = pelican_cjk.ranges_as_regex(data)
self.assertEqual(re_range, '[A\u00A0\u00A1!@#$%]')
def test_ranges_mixed(self):
data = (
('A', 'Z'),
('\u00A0', ''),
('!@#', ''),
('a', 'z'),
)
re_range = pelican_cjk.ranges_as_regex(data)
self.assertEqual(re_range, '[A-Z\u00A0!@#a-z]')
class CjkRangeTestCase(unittest.TestCase):
def is_cjk(self, c):
return re.match(pelican_cjk.ranges_as_regex(pelican_cjk.CJK_RANGES), c) is not None
def test_pattern_matched(self):
self.assertTrue(self.is_cjk('我'))
self.assertTrue(self.is_cjk('あ'))
self.assertTrue(self.is_cjk('マ'))
self.assertTrue(self.is_cjk('한'))
self.assertTrue(self.is_cjk('목'))
self.assertTrue(self.is_cjk('ㄱ'))
def test_pattern_not_matched(self):
self.assertFalse(self.is_cjk('a'))
self.assertFalse(self.is_cjk('Ā'))
self.assertFalse(self.is_cjk('ŝ'))
self.assertFalse(self.is_cjk('>'))
self.assertFalse(self.is_cjk('!'))
self.assertFalse(self.is_cjk('\n'))
class RemoveNewlineTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_newline_should_be_removed_cases(self):
test_cases = (
('你好\n好笑', '你好好笑'),
('逗號,\n後面', '逗號,後面'),
('(全形括號)\n後面', '(全形括號)後面'),
('句號。\nabc', '句號。abc'),
('括號)\nabc', '括號)abc'),
('abc\n,中文', 'abc,中文'),
('。\na\n,\nb', '。a,b'),
)
for data, answer in test_cases:
with self.subTest(data=data, answer=answer):
result = pelican_cjk.remove_paragraph_newline(data)
self.assertEqual(result, answer)
def test_newline_should_be_kept_cases(self):
test_cases = (
'英文abcd\n後面',
'<em>中文</em>\n後面',
'``literal``\n下一行',
'`link`_\n下一行',
'**emph**\n下一行',
'半形逗號,\n下一行',
'半形句號.\n下一行',
)
for data in test_cases:
with self.subTest(data=data):
result = pelican_cjk.remove_paragraph_newline(data)
self.assertEqual(result, data)
class AutoSpacingTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_should_add_space_cases(self):
test_cases = (
('哈哈ABC', '哈哈 ABC'),
('ABC中文', 'ABC 中文'),
('哈哈##@@嘻嘻', '哈哈 ##@@ 嘻嘻'),
('一2三4', '一 2 三 4'),
('1二3四', '1 二 3 四'),
('這是α', '這是 α'),
('這是£', '這是 £'),
('這是=', '這是 ='),
)
for data, answer in test_cases:
with self.subTest(data=data, answer=answer):
result = pelican_cjk.auto_spacing(data)
self.assertEqual(result, answer)
def test_with_tag_should_add_space_cases(self):
test_cases = (
('A<em>啥</em>B', 'A <em>啥</em> B'),
('中<em>A</em>文', '中 <em>A</em> 文'),
('哈哈<em>ABC</em>', '哈哈 <em>ABC</em>'),
('ABC<em>哈哈</em>', 'ABC <em>哈哈</em>'),
('<strong>ABC</strong>中文', '<strong>ABC</strong> 中文'),
('<strong>中文</strong>ABC', '<strong>中文</strong> ABC'),
('一<em>2</em>三<em>4</em>', '一 <em>2</em> 三 <em>4</em>'),
('<em>1</em>二<em>3</em>四', '<em>1</em> 二 <em>3</em> 四'),
('ABC<a href=http://a.b.c>連結</a>CBA', 'ABC <a href=http://a.b.c>連結</a> CBA'),
('<em>A</em>NotCJK<em>中文</em>', '<em>A</em>NotCJK <em>中文</em>'),
('<em>中</em>是中文<strong>A</strong>', '<em>中</em>是中文 <strong>A</strong>'),
)
for data, answer in test_cases:
with self.subTest(data=data):
result = pelican_cjk.auto_spacing(data)
self.assertEqual(result, answer)
def test_should_not_change_cases(self):
test_cases = (
'abcd α£ 1234',
'哈<some_tag/>啥',
'中文<p>啥</p>中文',
'中文 <p>啥</p> 中文',
'abc <em>def</em> ghi',
'五<六>七',
'這&還在',
'abc。123',
'123,abc',
)
for data in test_cases:
with self.subTest(data=data):
result = pelican_cjk.auto_spacing(data)
self.assertEqual(result, data)
def test_with_tag_should_not_change_cases(self):
test_cases = (
'這是 <strong>中文</strong> 好嗎',
'ABC <em>ABC</em> ABC',
'Nested<em><strong>行內</strong></em>Markup',
'<em>行</em><strong>inside</strong',
)
for data in test_cases:
with self.subTest(data=data):
result = pelican_cjk.auto_spacing(data)
self.assertEqual(result, data)
class RemoveMarkupSpacingTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_should_remove_space_cases(self):
test_cases = (
('中文 <em>中文</em>', '中文<em>中文</em>'),
('<em>中文</em> 中文', '<em>中文</em>中文'),
('你 <em>好</em> 嗎 <strong>嗨</strong> 嗨',
'你<em>好</em>嗎<strong>嗨</strong>嗨'),
)
for data, answer in test_cases:
with self.subTest(data=data):
result = pelican_cjk.remove_markup_spacing(data)
self.assertEqual(result, answer)
def test_should_not_change_cases(self):
test_cases = (
'中文 中文 中文',
'ABC <em>中文</em>',
'<em>中文</em> ABC',
'中文 <em>ABC</em>',
'<em>中文</em> ABC',
'<strong><em>中文</strong></em> 中文'
'中文 <strong><em>中文</strong></em>'
)
for data in test_cases:
with self.subTest(data=data):
result = pelican_cjk.remove_markup_spacing(data)
self.assertEqual(result, data)
@mock.patch('pelican_cjk.remove_markup_spacing')
@mock.patch('pelican_cjk.remove_paragraph_newline')
@mock.patch('pelican_cjk.auto_spacing')
class MainTestCase(unittest.TestCase):
def setUp(self):
self.mock_content = mock.MagicMock()
self.mock_content._content = 'SomeTextThatWillRemainIntact'
self.mock_content.settings = {'someKey': 'someValue'}
def tearDown(self):
pass
def test_with_default_settings(
self,
m_auto_spacing,
m_remove_newline,
m_remove_markup_spacing):
pelican_cjk.main(self.mock_content)
m_remove_newline.assert_called_with('SomeTextThatWillRemainIntact')
m_auto_spacing.assert_called_with(m_remove_newline.return_value)
m_remove_markup_spacing.assert_called_with(m_auto_spacing.return_value)
def test_with_CJK_REMOVE_PARAGRAPH_NEWLINE_disabled(
self,
m_auto_spacing,
m_remove_newline,
m_remove_markup_spacing):
self.mock_content.settings.update({'CJK_REMOVE_PARAGRAPH_NEWLINE': False})
pelican_cjk.main(self.mock_content)
m_remove_newline.assert_not_called()
m_auto_spacing.assert_called_with('SomeTextThatWillRemainIntact')
m_remove_markup_spacing.assert_called_with(m_auto_spacing.return_value)
def test_with_CJK_AUTO_SPACING_disabled(
self,
m_auto_spacing,
m_remove_newline,
m_remove_markup_spacing):
self.mock_content.settings.update({'CJK_AUTO_SPACING': False})
pelican_cjk.main(self.mock_content)
m_remove_newline.assert_called_with('SomeTextThatWillRemainIntact')
m_auto_spacing.assert_not_called()
m_remove_markup_spacing.assert_called_with(m_remove_newline.return_value)
def test_with_REMOVE_CJK_MAKRUP_SPACING_disabled(
self,
m_auto_spacing,
m_remove_newline,
m_remove_markup_spacing):
self.mock_content.settings.update({'CJK_REMOVE_MARKUP_SPACING': False})
pelican_cjk.main(self.mock_content)
m_remove_newline.assert_called_with('SomeTextThatWillRemainIntact')
m_auto_spacing.assert_called_with(m_remove_newline.return_value)
m_remove_markup_spacing.assert_not_called()
``` |
{
"source": "john-livingston/rossby-ridge",
"score": 2
} |
#### File: src/figures/ages.py
```python
import numpy as np
import pandas as pd
from astropy.table import Table
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams["figure.dpi"] = 100
mpl.rcParams["savefig.bbox"] = "tight"
mpl.rcParams["savefig.dpi"] = 300
import seaborn as sns
from scipy import interpolate
import astropy.constants as c
sun = {"teff": 5772,
"prot": 25.4,
"e_prot": 25.4-24.5,
"E_prot": 36-25.4
}
sun["logg"] = np.log10(c.GM_sun.cgs.value/c.R_sun.cgs.value**2)
######################################################################################
#McQuillan et al. 2013
mcq_koi = Table.read("https://cdsarc.cds.unistra.fr/ftp/J/ApJ/775/L11/table1.dat",
readme="https://cdsarc.cds.unistra.fr/ftp/J/ApJ/775/L11/ReadMe",
format="ascii.cds")
mcq_koi = mcq_koi.to_pandas()
mcq_koi = mcq_koi.add_prefix('mcq_')
#McQuillan et al. 2014
# mcq = Table.read('../data/mcquillan2014/table1.dat',
# readme='../data/mcquillan2014/ReadMe',
# format='ascii.cds')
# mcq = mcq.to_pandas()
# mcq = mcq.add_prefix('mcq_')
mcq = pd.read_parquet('../data/mcquillan2014_table1.parquet')
######################################################################################
######################################################################################
# California-Kepler Survey (Fulton & Petigura 2018)
# This data table has been augmented with data from other surveys (see David et al. 2021)
cks = pd.read_parquet('../data/cks_merged.parquet')
# The dataframe has a row entry for each KOI, meaning individual star are represented N times
# where N is the number of KOIs detected around that star so we drop duplicates.
cks = cks.drop_duplicates(subset=['kepid'], keep='first')
cks = cks.merge(mcq_koi, how='left', left_on='kepid', right_on='mcq_KIC')
######################################################################################
######################################################################################
# LAMOST-Kepler
lam = pd.read_csv('../data/kepler_lamost.csv')
print('LAMOST unique KIC targets:', len(np.unique(lam["KIC"])))
print('LAMOST unique DR2 targets:', len(np.unique(lam["DR2Name"])))
# Drop duplicate sources, keeping the one with the brighter G magnitude
lam = lam.sort_values(["KIC", "Gmag"], ascending = (True, True))
lam = lam.merge(mcq, how='left', left_on="KIC", right_on="mcq_KIC")
lam = lam.drop_duplicates(subset=['KIC'], keep='first')
lam_mask = (lam["Teff_lam"]>3000)
lam_mask = (lam["Teff_lam"]<8000)
lam_mask &= (lam["logg_lam"]>3)
lam_mask &= (lam["logg_lam"]<5)
lam_mask &= (abs(lam["feh_lam"])<2)
lam = lam[lam_mask]
print('LAMOST unique KIC targets:', len(np.unique(lam["KIC"])))
print('LAMOST unique DR2 targets:', len(np.unique(lam["DR2Name"])))
print('Median LAMOST Teff error:', np.median(lam["e_Teff_lam"]))
######################################################################################
############################################
cks_teff = cks["p20_cks_steff"]
cks_e_teff = cks["p20_cks_steff_err1"]
cks_prot = cks["d21_prot"]
def ridge_hi(teff):
m = (2-24)/(6500-5800)
b = (2 - m*6500)
return m*teff + b
def ridge_lo(teff):
m = (2-24)/(6500-5800)
b = (-5 - m*6500)
return m*teff + b
mask = (cks['p20_cks_slogg']>4.1) #main sequence
ridge = (cks['p20_cks_steff']>5850)
ridge &= (cks['p20_cks_steff']<6500)
ridge &= (cks['d21_prot']<ridge_hi(cks['p20_cks_steff']))
ridge &= (cks['d21_prot']>ridge_lo(cks['p20_cks_steff']))
ridge &= mask
############################################
######################################################################################
#bk = pd.read_csv("../data/_kim_2010/-kim-2010.csv")
def convective_turnover_timescale(teff,
ref='gunn1998'):
#Returns convective turnover timescale in days
if ref == 'gunn1998':
#Gunn et al. 1998 relation, from Cranmer & Saar 2011
return 314.24*np.exp( -(teff/1952.5) - (teff/6250.)**18. ) + 0.002
# elif ref == '2010':
# # & Kim 2010 relation for local tau_c
# teff_pts = 10.**bk['logT']
# tc_pts = bk['Local_tau_c']
# return np.interp(teff, teff_pts, tc_pts)
def constant_rossby(teff, ro):
#Return locus of rotation periods corresponding to constant Rossby number
return ro * convective_turnover_timescale(teff)
######################################################################################
#Models
std = pd.read_hdf('../data/models/standard_population.h5', key='sample')
std['ro'] = std['period']/(std['taucz']/86400)
std = std[std['evo']==1] # limit to main-sequence
roc = pd.read_hdf('../data/models/rocrit_population.h5', key='sample')
roc['ro'] = roc['period']/(roc['taucz']/86400)
roc = roc[roc['evo']==1] # limit to main-sequence
fig, ax = plt.subplots()
sns.kdeplot(
x=lam["Teff_lam"],
y=lam["Prot"],
fill=True,
bw_adjust=0.25,
levels=4,
#levels=[0.25,0.5,0.75,1],
ax=ax
)
ax.scatter(lam["Teff_lam"], lam["mcq_Prot"], s=0.1, c='orange', alpha=1, rasterized=True, label='LAMOST–McQuillan')
ax.set_xlim(5000,7000)
ax.set_ylim(0,30)
_x = np.linspace(5000,6250,100)
ax.plot(_x, constant_rossby(_x, 1.3), 'k--')
ax.plot(_x, constant_rossby(_x, 0.5), 'k--')
dist = abs(lam["Prot"] - constant_rossby(lam["Teff_lam"], 1.3))
frac_dist = abs(lam["Prot"] - constant_rossby(lam["Teff_lam"], 1.3))/constant_rossby(lam["Teff_lam"], 1.3)
lam_ridge = (frac_dist<0.05) & (lam["Teff_lam"]>5500) & (lam["Teff_lam"]<6500) & (lam["logg_lam"]>4.1) & (lam["logg_lam"]<4.75)
ax.plot(lam["Teff_lam"][lam_ridge], lam["Prot"][lam_ridge], 'o', mfc="None", color='white', alpha=0.2);
sns.set(font_scale=1.1, context="paper", style="ticks")
mpl.rcParams["legend.markerscale"] = 2
all_f = mask & (cks['p20_cks_steff']>5800) & (cks['p20_cks_slogg']<4.5) & (cks['p20_cks_steff']<6500)
fig,axes=plt.subplots(nrows=2,ncols=2,figsize=(8,6))
ax = axes[0]
ax[0].plot(cks['p20_cks_steff'], cks['p20_cks_slogg'], '.', color='lightgrey', label='California–Kepler Survey', rasterized=True, ms=2)
ax[0].plot(cks['p20_cks_steff'][ridge], cks['p20_cks_slogg'][ridge], '.', color='k', label='Long-period pile-up', rasterized=True, ms=2)
ax[0].errorbar(5000, 4.8, xerr=np.nanmedian(cks['p20_cks_steff_err1']),
yerr=np.nanmedian(cks['p20_cks_slogg_err1']), fmt='.', color='k', zorder=1000)
ax[1].plot(lam['Teff_lam'], lam['logg_lam'],
'.', color='lightgrey', label='LAMOST–McQuillan', rasterized=True, ms=2)
ax[1].plot(lam['Teff_lam'][lam_ridge], lam['logg_lam'][lam_ridge],
'.', color='k', label='Long-period pile-up', rasterized=True, ms=2)
ax[1].errorbar(5000, 4.8, xerr=np.nanmedian(lam['e_Teff_lam']),
yerr=np.nanmedian(lam['e_logg_lam']), fmt='.', color='k', zorder=1000)
ax[0].plot([5850,6500,6500,5850,5850],[4.1,4.1,4.75,4.75,4.1],color='k',lw=0.5)
ax[1].plot([5500,6500,6500,5500,5500],[4.1,4.1,4.75,4.75,4.1],color='k',lw=0.5)
for i in range(2):
ax[i].set_xlim(6750,4500)
ax[i].set_ylim(5,3)
ax[i].set_ylabel('log(g) [dex]')
ax[i].set_xlabel('Effective temperature [K]')
ax[i].text(4900, 4.9, 'typical\nerror', size=10)
ax[i].plot(sun["teff"], sun["logg"], 'o', color='orange', label='Sun')
lgnd = ax[i].legend(prop={'size':10}, loc='upper left')
for i,let in enumerate("ab"):
ax[i].text(1.05,1.05,let,transform=ax[i].transAxes,weight='bold')
ax = axes[1]
cks['cks_e_age'] = cks['cks_age'] - (10.**(cks['cks_logAiso']-cks['cks_e_logAiso'])/1.0e9)
cks['cks_E_age'] = (10.**(cks['cks_logAiso']+cks['cks_E_logAiso'])/1.0e9) - cks['cks_age']
# ax[0].errorbar(cks['p20_cks_steff'], cks['cks_age'],
# xerr=[cks['cks_e_Teff'], cks['cks_E_Teff']],
# yerr=[cks['cks_e_age'], cks['cks_E_age']], fmt='o', color='lightgrey',mec='lightgrey', linewidth=0, ecolor='lightgrey', zorder=1, alpha=0.5)
# ax[0].errorbar(cks['p20_cks_steff'][ridge], cks['cks_age'][ridge],
# xerr=[cks['cks_e_Teff'][ridge], cks['cks_E_Teff'][ridge]],
# yerr=[cks['cks_e_age'][ridge], cks['cks_E_age'][ridge]], fmt='o', mec='white', linewidth=0, color='k', ecolor='k', zorder=2)
cks_age_err = np.max([np.nanmedian(cks['cks_e_age']), np.nanmedian(cks['cks_E_age'])])
cks_teff_err = np.nanmedian(cks['p20_cks_steff_err1'])
spocs_age_err = np.max([np.nanmedian(cks['bf18_e_Age']), np.nanmedian(cks['bf18_E_Age'])])
spocs_teff_err = np.nanmedian(cks['bf18_e_Teff'])
ax[0].plot(cks['p20_cks_steff'], cks['cks_age'],
'.', color='lightgrey', zorder=1)
#Plot models
for i in range(2):
ax[i].plot(roc['Teff'][roc['ro']>2], roc['age'][roc['ro']>2],
',', ms=0.2, color='orange', alpha=0.5, zorder=1, rasterized=True)
ax[i].plot(roc['Teff'][roc['ro']<2], roc['age'][roc['ro']<2],
',', ms=0.2, color='C0', alpha=0.5, zorder=1, rasterized=True)
ax[0].plot(cks['p20_cks_steff'][ridge], cks['cks_age'][ridge],
'.', color='k', zorder=2)
ax[0].errorbar(6500, 10, xerr=cks_teff_err,
yerr=cks_age_err, fmt='.', color='k', zorder=1000)
ax[0].set_ylabel('CKS Age [Gyr]')
ax[0].set_xlabel('CKS Effective temperature [K]')
# ax[1].errorbar(cks['p20_cks_steff'], cks['bf18_Age'],
# xerr=[cks['cks_e_Teff'], cks['cks_E_Teff']],
# yerr=[cks['bf18_e_Age'], cks['bf18_E_Age']], fmt='o', color='lightgrey', mec='lightgrey', linewidth=0, ecolor='lightgrey', alpha=0.5, zorder=1)
# ax[1].errorbar(cks['p20_cks_steff'][ridge], cks['bf18_Age'][ridge],
# xerr=[cks['cks_e_Teff'][ridge], cks['cks_E_Teff'][ridge]],
# yerr=[cks['bf18_e_Age'][ridge], cks['bf18_E_Age'][ridge]], fmt='o', mec='white', linewidth=0, color='k', ecolor='k', zorder=2)
ax[1].plot(cks['bf18_Teff'], cks['bf18_Age'],
'.', color='lightgrey', zorder=1)
ax[1].plot(cks['bf18_Teff'][ridge], cks['bf18_Age'][ridge],
'.', color='k', zorder=2)
ax[1].errorbar(6500, 10, xerr=spocs_teff_err,
yerr=spocs_age_err, fmt='.', color='k', zorder=1000)
ax[1].set_ylabel('SPOCS Age [Gyr]')
ax[1].set_xlabel('SPOCS Effective temperature [K]')
for i in range(2):
ax[i].set_xlim(6600,5600)
ax[i].set_ylim(0,12)
ax[i].scatter(sun["teff"], 4.567, color='orange', zorder=3)
ax[i].text(6500, 11.5, 'typical error', size=10)
for i,let in enumerate("cd"):
ax[i].text(1.05,1.05,let,transform=ax[i].transAxes,weight='bold')
plt.tight_layout()
sns.despine()
plt.savefig('../figures/ages.pdf')
print('5th and 95th percentile range of CKS ages (Gyr) :', np.nanpercentile(cks['cks_age'][ridge], [5,95]))
print('5th and 95th percentile range of SPOCS ages (Gyr) :', np.nanpercentile(cks['bf18_Age'][ridge], [5,95]))
```
#### File: src/figures/gap.py
```python
import numpy as np
import pandas as pd
import astropy.constants as c
from astropy.table import Table
from astropy.table import join
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams["figure.dpi"] = 100
mpl.rcParams["savefig.bbox"] = "tight"
mpl.rcParams["savefig.dpi"] = 300
import seaborn as sns
sun = {"teff": 5772,
"prot":25.4,
"e_prot":0,
"E_prot":36-25.4
}
sun["logg"] = np.log10(c.GM_sun.cgs.value/c.R_sun.cgs.value**2)
######################################################################################
#McQuillan et al. 2014
# mcq = Table.read('../data/mcquillan2014/table1.dat',
# readme='../data/mcquillan2014/ReadMe',
# format='ascii.cds')
# mcq = mcq.to_pandas()
# mcq = mcq.add_prefix('mcq_')
mcq = pd.read_parquet('../data/mcquillan2014_table1.parquet')
######################################################################################
######################################################################################
#Gaia-Kepler cross-match from Megan Bedell
gk = pd.read_parquet('../data/kepler_dr2_1arcsec.parquet')
######################################################################################
######################################################################################
# LAMOST-Kepler
lam = pd.read_csv('../data/kepler_lamost.csv')
print('LAMOST unique KIC targets:', len(np.unique(lam["KIC"])))
print('LAMOST unique DR2 targets:', len(np.unique(lam["DR2Name"])))
# Drop duplicate sources, keeping the one with the brighter G magnitude
lam = lam.sort_values(["KIC", "Gmag"], ascending = (True, True))
#Merge
lam = lam.merge(mcq, how="left", left_on="KIC", right_on="mcq_KIC")
lam = lam.merge(gk, how="left", left_on="KIC", right_on="kepid")
lam = lam.drop_duplicates(subset=['KIC'], keep='first')
lam_mask = (lam["Teff_lam"]>3000)
lam_mask = (lam["Teff_lam"]<8000)
lam_mask &= (lam["logg_lam"]>3)
lam_mask &= (lam["logg_lam"]<5)
lam_mask &= (abs(lam["feh_lam"])<2)
#lam_mask &= (lam["mcq_Rper"]>2)
#lam_mask &= (lam["phot_g_mean_mag"]<15)
#lam_mask &= (lam["r_est"]>0.) & (lam["r_est"]<500.)
lam = lam[lam_mask]
print('LAMOST unique KIC targets:', len(np.unique(lam["KIC"])))
print('LAMOST unique DR2 targets:', len(np.unique(lam["DR2Name"])))
print('Median LAMOST Teff error:', np.median(lam["e_Teff_lam"]))
######################################################################################
######################################################################################
#bk = pd.read_csv("../data/_kim_2010/-kim-2010.csv")
def convective_turnover_timescale(teff,
ref='gunn1998'):
#Returns convective turnover timescale in days
if ref == 'gunn1998':
#Gunn et al. 1998 relation, from Cranmer & Saar 2011
return 314.24*np.exp( -(teff/1952.5) - (teff/6250.)**18. ) + 0.002
# elif ref == '2010':
# # & Kim 2010 relation for local tau_c
# teff_pts = 10.**bk['logT']
# tc_pts = bk['Local_tau_c']
# return np.interp(teff, teff_pts, tc_pts)
def constant_rossby(teff, ro):
#Return locus of rotation periods corresponding to constant Rossby number
return ro * convective_turnover_timescale(teff)
lam["Ro"] = lam["Prot"]/convective_turnover_timescale(lam["Teff_lam"], ref='gunn1998')
#lam["Ro_"] = lam["Prot"]/convective_turnover_timescale(lam["Teff_lam"], ref='2010')
mcq["Ro"] = mcq["mcq_Prot"]/convective_turnover_timescale(mcq["mcq_Teff"], ref='gunn1998')
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
bottom = cm.get_cmap('Oranges', 128)
top = cm.get_cmap('Blues_r', 128)
newcolors = np.vstack((top(np.linspace(0, 1, 128)),
bottom(np.linspace(0, 1, 128))))
OrBu = ListedColormap(newcolors, name='OrangeBlue')
sns.set(font_scale=1.6, context="paper", style="ticks")
sc_kws = {"marker":".",
"alpha": 0.5,
"rasterized":True,
"cmap": OrBu,
"s": 40,
"vmin":3,
"vmax":4.5,
"lw":0}
fig,(ax1,ax2) = plt.subplots(nrows=1, ncols=2,
figsize=(14,5))
logrper = np.log10(lam["mcq_Rper"])
lam_arg = (abs(np.log10(lam["Ro"]))<1)
sns.kdeplot(lam["Teff_lam"],
lam["Prot"],
bw_adjust=0.6, ax=ax1, lw=0.1, color='k', alpha=0.5)
sns.kdeplot(mcq["mcq_Teff"],
mcq["mcq_Prot"],
bw_adjust=0.6, ax=ax2, lw=0.1, color='k', alpha=0.5)
cb1 = ax1.scatter(lam["Teff_lam"],
lam["Prot"],
c=logrper, **sc_kws)
cb2 = ax2.scatter(mcq["mcq_Teff"],
mcq["mcq_Prot"],
c=np.log10(mcq["mcq_Rper"]),
**sc_kws)
for ax in [ax1,ax2]:
_teff = np.linspace(3000,7000,1000)
ax.plot(_teff, constant_rossby(_teff, 0.5), '--', color='k', lw=2, alpha=0.5)
ax.set_xlim(7000,3000)
ax.set_ylim(0,50)
ax.set_xlabel("Effective temperature [K]")
ax.set_ylabel("Rotation period [d]")
ax1.set_title('LAMOST–McQuillan')
ax2.set_title('McQuillan et al. 2014')
plt.colorbar(cb1, label=r'log(R$_\mathregular{per}$/ppm) [dex]', ax=ax1)
plt.colorbar(cb2, label=r'log(R$_\mathregular{per}$/ppm) [dex]', ax=ax2)
plt.tight_layout()
sns.despine()
plt.savefig('../figures/gap.pdf')
#plt.show()
``` |
{
"source": "john-livingston/stardate",
"score": 2
} |
#### File: paper/code/parallel.py
```python
import os
import sys
import numpy as np
import pandas as pd
import h5py
import tqdm
import emcee
# from isochrones.mist import MIST_Isochrone
# mist = MIST_Isochrone()
# from isochrones import StarModel, get_ichrone
# bands = ["B", "V", "J", "H", "K"]
# mist = get_ichrone("mist", bands=bands)
import stardate as sd
from stardate.lhf import lnprob
from multiprocessing import Pool
# Necessary to add cwd to path when script run
# by SLURM (since it executes a copy)
sys.path.append(os.getcwd())
# def infer_stellar_age(row):
# df = row[1]
def infer_stellar_age(df):
# Small observational uncertainties are needed (even though the stars
# weren't simulated with any) in order to get a good fit.
teff_err = 25 # Kelvin
logg_err = .05 # dex
feh_err = .05 # dex
jmag_err = .01 # mags
hmag_err = .01 # mags
kmag_err = .01 # mags
B_err, V_err, bp_err, rp_err = .01, .01, .01, .01
parallax_err = .05 # milliarcseconds
prot_err = 1 # Days
BV_err = .01 # mags
# Infer ages of the simulated stars.
# Set up the parameter dictionary.
iso_params = {"teff": (df["teff"], teff_err),
"logg": (df["logg"], logg_err),
"feh": (df["feh"], feh_err),
"J": (df["jmag"], jmag_err),
"H": (df["hmag"], hmag_err),
"K": (df["kmag"], kmag_err),
"B": (df["B"], B_err),
"V": (df["V"], V_err),
"G": (df["G"], bp_err),
"BP": (df["BP"], bp_err),
"RP": (df["RP"], rp_err),
"parallax": (df["parallax"], parallax_err),
"maxAV": .1}
# Infer an age with isochrones and gyrochronology.
try:
sd_fn = "{}_stardate".format(str(int(df["ID"])).zfill(4))
iso_fn = "{}_isochrones".format(str(int(df["ID"])).zfill(4))
if not os.path.exists(sd_fn):
# Set up the star object
star = sd.Star(iso_params, prot=df["prot"], prot_err=.01,
filename=sd_fn)
# Run the MCMC
sampler = star.fit(max_n=300000)
else:
print("failed to save file for star. File exists: ", sd_fn)
# # Now infer an age with isochrones only.
# if not os.path.exists(iso_fn):
# # Set up the star object
# star_iso = sd.Star(iso_params, None, None, filename=iso_fn)
# # Run the MCMC
# sampler = star_iso.fit(max_n=200000, iso_only=True)
# else:
# print("failed to save file for star. File exists: ", iso_fn)
except:
print("failed to save file for star ", str(int(df["ID"])).zfill(4))
if __name__ == "__main__":
# Load the simulated data file.
df = pd.read_csv("data/simulated_data.csv")
# df = df.iloc[5:6]
assert len(df.ID) == len(np.unique(df.ID))
ids = np.array([69, 132, 139, 178, 190, 216, 240, 246, 296, 325, 330, 349,
443, 496])
df = df.iloc[ids]
list_of_dicts = []
for i in range(len(df)):
list_of_dicts.append(df.iloc[i].to_dict())
print(list_of_dicts[0])
print(len(list_of_dicts))
p = Pool(14)
# list(p.map(infer_stellar_age, list_of_dicts))
list(p.map(infer_stellar_age, list_of_dicts))
# p.map(infer_stellar_age, df.iterrows())
```
#### File: stardate/stardate/lhf.py
```python
import numpy as np
from isochrones.mist import MIST_Isochrone
from isochrones import StarModel, get_ichrone
# mist = MIST_Isochrone(bands)
bands = ["B", "V", "J", "H", "K", "BP", "RP", "G"]
mist = get_ichrone("mist", bands=bands)
import emcee
import h5py
def gyro_model(log10_age, bv):
"""Predict a rotation period from an age and B-V colour.
Given a B-V colour and an age, predict a rotation period using the Angus
et al. (2015) gyrochronology model.
Args:
log10_age (float or array): The logarithmic age of a star or stars,
log10(age), in years.
bv (float or array): The B-V colour of a star or stars.
Returns:
Rotation period in days.
"""
age_myr = (10**log10_age)*1e-6
a, b, c, n = [.4, .31, .45, .55]
if bv < c:
return 0
else:
return (n*np.log10(age_myr) + np.log10(a) + b*np.log10(bv-c))
def gk_rotation_model(log10_age, bprp):
"""
Predicts log10 rotation period from log10 color and log10 age.
Only applicable to GK dwarfs.
Args:
log10_age (float): The (log10) age.
bprp (float): The G_bp - G_rp color.
Returns:
log10_period (float): The period.
"""
log10_bprp = np.log10(bprp)
# Parameters with Solar bp - rp = 0.82
p = [-38.957586198640314, 28.709418579540294, -4.919056437046026,
0.7161114835620975, -4.716819674578521, 0.6470950862322454,
-13.558898318835137, 0.9359250478865809]
return np.polyval(p[:5], log10_bprp) + p[5]*log10_age
def gk_age_model(log10_period, bprp):
"""
Predicts log10 age from log10 color and log10 period.
Only applicable to GK dwarfs.
Args:
log10_period (array): The (log10) period array.
log10_bprp (array): The (log10) G_bp - G_rp color array.
Returns:
log10_age (array): The (log10) age array.
"""
log10_bprp = np.log10(bprp)
# Hard-code the gyro parameters :-)
p = [-38.957586198640314, 28.709418579540294, -4.919056437046026,
0.7161114835620975, -4.716819674578521, 0.6470950862322454,
-13.558898318835137, 0.9359250478865809]
logage = (log10_period - np.polyval(p[:5], log10_bprp))/p[5]
return logage
def gyro_model_praesepe(log10_age, bprp):
"""
Predicts log10 rotation period from log10 color and log10 age.
Args:
log10_age (float): The (log10) age.
bprp (float): The G_bp - G_rp color.
Returns:
log10_period (float): The period.
"""
# Log rotation period is zero if the star is very hot.
# Don't try to take log of negative number.
if bprp < 0.:
return .56
log10_bprp = np.log10(bprp)
# Hard-code the gyro parameters :-)
# c4, c3, c2, c1, c0, cA, b1, b0
# Parameters with Solar bp - rp = 0.82
p = [-38.957586198640314, 28.709418579540294, -4.919056437046026,
0.7161114835620975, -4.716819674578521, 0.6470950862322454,
-13.558898318835137, 0.9359250478865809]
# Parameters with Solar bp - rp = 0.77
# p = [-38.982347111370984, 28.706848179526098, -4.922906414784183,
# 0.7176636876966253, -5.489008990829778, 0.7347258099244045,
# -13.55785651951684, 0.16105197784241776]
if log10_bprp >= .43:
return np.polyval(p[6:], log10_bprp) + p[5]*log10_age
elif log10_bprp < -.25:
return 0.56
else:
return np.polyval(p[:5], log10_bprp) + p[5]*log10_age
def age_model(log10_period, bprp):
"""
Predicts log10 age from log10 color and log10 period.
Args:
log10_period (array): The (log10) period array.
log10_bprp (array): The (log10) G_bp - G_rp color array.
Returns:
log10_age (array): The (log10) age array.
"""
# If star is very hot, return the age of the Universe.
# Don't try to take the log of a negative number.
if bprp < 0:
return 10.14
log10_bprp = np.log10(bprp)
# Hard-code the gyro parameters :-)
p = [-38.957586198640314, 28.709418579540294, -4.919056437046026,
0.7161114835620975, -4.716819674578521, 0.6470950862322454,
-13.558898318835137, 0.9359250478865809]
# p = [-38.982347111370984, 28.706848179526098, -4.922906414784183,
# 0.7176636876966253, -5.489008990829778, 0.7347258099244045,
# -13.55785651951684, 0.16105197784241776]
if log10_bprp >= .43:
# return (log10_period - np.polyval(p[6:], log10_bprp))/p[5]
return 10.14 # The age of the universe
elif log10_bprp < -.25:
return 10.14
else:
logage = (log10_period - np.polyval(p[:5], log10_bprp))/p[5]
return logage
def gyro_model_rossby(params, Ro_cutoff=2, rossby=True, model="praesepe"):
"""Predict a rotation period from parameters EEP, age, feh, distance, Av.
Args:
params (array): The stellar parameters: EEP, log10(age), [Fe/H],
distance and Av.
Ro_cutoff (float, optional): The critical Rossby number after which
stars retain their rotation period. This is 2.16 in van Saders et
al. (2016) and 2.08 in van Saders et al. (2018). We adopt a
default value of 2.
rossby (Optional[bool]): If True (default), the van Saders (2016)
weakened magnetic braking law will be implemented. If false, the
gyrochronology relation will be used unmodified.
model (Optional[str)]: The gyrochronology model. If "praesepe", the
Praesepe-based gyro model will be used (default) and if "angus15",
the Angus et al. (2015) model will be used.
Returns:
The log10(rotation period) and the period standard deviation in dex.
"""
if model == "angus15":
color = calc_bv(params)
elif model == "praesepe":
color = calc_bprp(params)
mass = mist.interp_value([params[0], params[1], params[2]], ["mass"])
# If color is nan, return nan. This should be caught by the lhf.
if np.isfinite(color) == False:
return np.nan, np.nan
# Calculate the additional sigma
sig = sigma(params[0], params[1], params[2], color, model=model)
log_P = period_model(color, mass, params[1], Ro_cutoff=Ro_cutoff,
rossby=rossby, model=model)
return log_P, sig
def period_model(color, mass, age, Ro_cutoff=2, rossby=True,
model="praesepe"):
"""Predict a rotation period from an age, color and mass.
Predict a rotation period from an age, color and mass using either the
Angus et al. (2019) Praesepe model or the Angus et al. (2015)
gyrochronology model with the van Saders et al. (2016) weakened magnetic
braking correction.
Args:
color (float): Either a star's Gaia G_BP - G_RP color, if using the
praesepe model, or its B-V color, if using the Angus 2015 model.
mass (float): Stellar mass in Solar units.
age (float): log10 stellar age in years.
Ro_cutoff (float, optional): The critical Rossby number after which
stars retain their rotation period. This is 2.16 in van Saders et
al. (2016) and 2.08 in van Saders et al. (2018). We adopt a
default value of 2.
rossby (Optional[bool]): If True (default), the van Saders (2016)
weakened magnetic braking law will be implemented. If false, the
gyrochronology relation will be used unmodified.
model (Optional[str)]: The gyrochronology model. If "praesepe", the
Praesepe-based gyro model will be used (default) and if "angus15",
the Angus et al. (2015) model will be used.
Returns:
The log10(rotation period) and the standard deviation in dex.
"""
if not rossby: # If Rossby model is switched off
# Standard gyro model
if model == "angus15":
log_P = gyro_model(age, color)
elif model == "praesepe":
log_P = gyro_model_praesepe(age, color)
return log_P
# Otherwise the Rossby model is switched on.
# Calculate the maximum theoretical rotation period for this mass.
pmax = Ro_cutoff * convective_overturn_time(mass)
# Calculate the age this star reaches pmax, based on its B-V color.
if model == "angus15":
# Angus et al. (2015) parameters.
a, b, c, n = [.4, .31, .45, .55]
if color < c:
log10_age_thresh = 10.14 # The age of the Universe
else:
age_thresh_myr = (pmax/(a*(color-c)**b))**(1./n)
log10_age_thresh = np.log10(age_thresh_myr*1e6)
elif model == "praesepe":
log10_age_thresh = age_model(np.log10(pmax), color)
# If star younger than critical age, predict rotation from age and color.
if age < log10_age_thresh:
if model == "angus15":
log_P = gyro_model(age, color)
elif model == "praesepe":
log_P = gyro_model_praesepe(age, color)
# If star older than this age, return maximum possible rotation period.
elif age >= log10_age_thresh:
log_P = np.log10(pmax)
return log_P
def calc_bv(mag_pars):
"""Calculate a B-V colour from stellar parameters.
Calculate B-V colour from stellar parameters [EEP, log10(age, yrs), feh,
distance (in parsecs) and extinction] using MIST isochrones.
Args:
mag_pars (list): A list containing EEP, log10(age) in years,
metallicity, distance in parsecs and V-band extinction, Av, for a
star.
Returns:
B-V color.
"""
_, _, _, bands = mist.interp_mag([*mag_pars], ["B", "V"])
B, V = bands
return B-V
def calc_bprp(mag_pars):
"""Calculate a G_bp-G_rp colour from stellar parameters.
Calculate bp-rp colour from stellar parameters [EEP, log10(age, yrs), feh,
distance (in parsecs) and extinction] using MIST isochrones.
Args:
mag_pars (list): A list containing EEP, log10(age) in years,
metallicity, distance in parsecs and V-band extinction, Av, for a
star.
Returns:
G_bp - G_rp color.
"""
_, _, _, bands = mist.interp_mag([*mag_pars], ["BP", "RP"])
bp, rp = bands
return bp - rp
# def lnprior(params):
# """ logarithmic prior on parameters.
# The (natural log) prior on the parameters. Takes EEP, log10(age) in years,
# metallicity (feh), distance in parsecs and V-band extinction (Av).
# Args:
# params (array-like): An array of EEP, age, feh, distance and
# extinction.
# Returns:
# The prior probability for the parameters.
# """
# # finite_mask = np.isfinite(params)
# # if sum(finite_mask) < len(params):
# # print(params, "non-finite parameter")
# # log Priors over age, metallicity and distance.
# # (The priors in priors.py are not in log)
# age_prior = np.log(priors.age_prior(params[1]))
# feh_prior = np.log(priors.feh_prior(params[2]))
# distance_prior = np.log(priors.distance_prior(params[3]))
# # Uniform prior on extinction.
# mAv = (0 <= params[4]) * (params[4] < 1) # Prior on A_v
# mAv &= np.isfinite(params[4])
# mAv = mAv == 1
# # Uniform prior on EEP
# m = (190 < params[0]) * (params[0] < 500) # Broad bounds on EEP.
# m &= np.isfinite(params[0])
# if mAv and m and np.isfinite(age_prior) and np.isfinite(distance_prior) \
# and np.isfinite(feh_prior):
# return age_prior + feh_prior + distance_prior
# else:
# return -np.inf
# def ptform(u):
# """
# Prior transform for sampling with dynesty.
# Args:
# u (array): The parameter array.
# Returns:
# u' (array): The parameters transformed from the unit cube to the prior
# space.
# """
# x = np.array(u)
# EEP between 100 and 800
# x[0] = 300*x[0] + 600 # x by range and + max
# x[0] = 700*x[0] + 800 # x by range and + max
# # Age between 0 and 13.8
# x[1] = np.log10(x[1]*13.8*1e9)
# # Fe/H between -5 and 5
# x[2] = x[2]*10 - 5
# # Distance uniform in log between 0 and 100 kpc
# x[3] = x[3]*np.log(100*1e3)
# # Av uniform between 0 and 1
# x[4] = x[4]
# return x
def lnprob(lnparams, *args):
""" The ln-probability function.
Calculates the logarithmic posterior probability (likelihood times prior)
of the model given the data.
Args:
lnparams (array): The parameter array containing Equivalent
Evolutionary Point (EEP), age in log10(yrs), metallicity, distance
in ln(pc) and V-band extinction. [EEP, log10(age [yrs]), [Fe/H],
ln(distance [pc]), A_v].
*args:
The arguments -- mod, period, period_err, iso_only, gyro_only,
rossby and model.
mod is the isochrones starmodel object which is set
up in stardate.py. period and period_err are the
rotation period and rotation period uncertainty (in days).
iso_only should be true if you want to use ONLY isochrone fitting
and not gyrochronology.
rossby is true if you want to use the van Saders + (2016) weakened
magnetic braking law. Set to false to turn this off.
model is "angus15" for the Angus + (2015) gyro model or "praesepe"
for the Praesepe gyro model.
Returns:
The log-posterior probability of the model given the data and the
log-prior.
"""
# transform mass and distance back to linear.
params = lnparams*1
params[3] = np.exp(lnparams[3])
# Unpack the args.
mod, period, period_err, iso_only, gyro_only, rossby, model = args
# Put a prior on EEP
if params[0] > 800: #2000:
return -np.inf, -np.inf
# If the prior is -inf, don't even try to calculate the isochronal
# likelihood.
lnpr = mod.lnprior(params)
if not np.isfinite(lnpr):
return -np.inf, -np.inf
like = lnlike(lnparams, *args)
return like + lnpr, lnpr
def lnlike(lnparams, *args):
""" The log-likelihood function.
Calculates the logarithmic likelihood of the data given the model.
Args:
lnparams (array): The parameter array containing Equivalent
Evolutionary Point (EEP), age in log10(yrs), metallicity, distance
in ln(pc) and V-band extinction. [EEP, log10(age [yrs]), [Fe/H],
ln(distance [pc]), A_v].
*args:
The arguments -- mod, period, period_err, iso_only, gyro_only,
rossby and model.
mod is the isochrones starmodel object which is set
up in stardate.py. period and period_err are the
rotation period and rotation period uncertainty (in days).
iso_only should be true if you want to use ONLY isochrone fitting
and not gyrochronology.
rossby is true if you want to use the van Saders + (2016) weakened
magnetic braking law. Set to false to turn this off.
model is "angus15" for the Angus + (2015) gyro model or "praesepe"
for the Praesepe gyro model.
Returns:
The log-likelihood
"""
# transform mass and distance back to linear.
params = lnparams*1
params[3] = np.exp(lnparams[3])
# Unpack the args.
mod, period, period_err, iso_only, gyro_only, rossby, model = args
# If isochrones only, just return the isochronal lhf.
if iso_only:
return mod.lnlike(params)
# Check that the period is a positive, finite number. It doesn't matter
# too much what the lhf is here, as long as it is constant.
if period is None or not np.isfinite(period) or period <= 0. \
or period_err is None or not np.isfinite(period_err) \
or period_err <= 0.:
gyro_lnlike, sig = -.5*((5/(20.))*2) - np.log(20.), 0
else:
# The gyrochronology lhf.
# The model
# Calculate a period using the gyrochronology model
log10_period_model, sig = gyro_model_rossby(params, rossby=rossby,
model=model)
# The variance model
relative_err = period_err/period
var = (relative_err*.434 + sig)**2
# The likelihood
# Calculate the gyrochronology likelihood.
gyro_lnlike = -.5*((log10_period_model - np.log10(period))**2/var) \
- .5*np.log(2*np.pi*var)
if gyro_only:
like = gyro_lnlike
else:
like = mod.lnlike(params) + gyro_lnlike
if not np.isfinite(like):
like = -np.inf
return float(like)
def nll(lnparams, args):
""" The negative ln-probability function.
Calculates the negative logarithmic posterior probability (likelihood times
prior) of the model given the data.
Args:
lnparams (array): The parameter array containing Equivalent
Evolutionary Point (EEP), age in log10(yrs), metallicity, distance
in ln(pc) and V-band extinction. [EEP, log10(age [yrs]), [Fe/H],
ln(distance [pc]), A_v].
*args:
The arguments -- mod, period, period_err, color, mass and iso_only.
mod is the isochrones starmodel object which is set
up in stardate.py. period, period_err, color and mass are the
rotation period and rotation period uncertainty (in days), B-V
color and mass [M_sun]. color and mass should both be None unless
only gyrochronology is being used.
Returns:
The negative log-posterior probability of the model given the data.
"""
lp, prior = lnprob(lnparams, *args)
return -lp
def convective_overturn_time(*args):
"""Estimate the convective overturn time.
Estimate the convective overturn time using equation 11 in Wright et al.
(2011): https://arxiv.org/abs/1109.4634
log tau = 1.16 - 1.49log(M/M⊙) - 0.54log^2(M/M⊙)
Args:
args: EITHER mass (float): Mass in Solar units OR eep (float):
The Equivalent evolutionary point of a star (355 for the Sun),
age (float): The age of a star in log_10(years) and feh (float):
the metallicity of a star.
Returns:
The convective overturn time in days.
"""
if len(args) > 1:
# Convert eep, age and feh to mass (mass will be in Solar mass units)
eep, age, feh = args
M = mist.interp_value([eep, age, feh], ["mass"])
else:
M = args[0]
log_tau = 1.16 - 1.49*np.log10(M) - .54*(np.log10(M))**2
return 10**log_tau
def sigmoid(k, x0, L, x):
"""
Computes a sigmoid function.
Args:
k (float): The logistic growth rate (steepness).
x0 (float): The location of 1/2 max.
L (float): The maximum value.
x, (array): The x-array.
Returns:
y (array): The logistic function.
"""
return L/(np.exp(-k*(x - x0)) + 1)
def sigma(eep, log_age, feh, color, model="praesepe"):
"""
The standard deviation of the rotation period distribution.
Currently comprised of two three logistic functions that 'blow up' the
variance at hot colours, cool colours and large EEPs. The FGK dwarf part
of the model has zero variance.
Args:
eep (float): The equivalent evolutionary point.
log_age (float): The log10(age) in years.
feh (float): The metallicity.
color (float): The G_BP - G_RP colour if model == "praesepe" or the
B-V color if model == "angus15"
"""
kcool, khot, keep = 100, 100, .2
Lcool, Lhot, Leep = .5, .5, 5
x0eep = 454 #454 #100 #454
k_old, x0_old = 100, np.log10(10*1e9)
k_young, x0_young = 20, np.log10(250*1e6)
L_age = .5
# k_feh, L_feh, x0_feh = 5, .5, 3.
k_feh, L_feh, x0_feh = 50, .5, .2
# k_feh, L_feh, x0_feh = 50, .5, .25
if model == "angus15":
x0cool, x0hot = 1.4, .45
if color > 0:
sigma_color = sigmoid(kcool, x0cool, Lcool, color) \
+ sigmoid(khot, -x0hot, Lhot, -color)
else:
sigma_color = .5
elif model == "praesepe":
x0cool, x0hot = .4, .25
if color > 0:
sigma_color = sigmoid(kcool, x0cool, Lcool, np.log10(color)) \
+ sigmoid(khot, x0hot, Lhot, -np.log10(color))
else:
sigma_color = .5
sigma_eep = sigma_eep = sigmoid(keep, x0eep, Leep, eep)
sigma_age = sigmoid(k_young, -x0_young, L_age, -log_age) \
# + sigmoid(k_old, x0_old, L_age, log_age) \
sigma_feh = sigmoid(k_feh, x0_feh, L_feh, feh) \
+ sigmoid(k_feh, x0_feh, L_feh, -feh)
sigma_total = sigma_color + sigma_eep + sigma_feh + sigma_age
return sigma_total
def calc_rossby_number(prot, mass):
"""
Calculate the Rossby number of a star.
Args:
prot (float or array): The rotation period in days.
mass (float or array): The mass in Solar masses.
Returns:
Ro (float or array): The Rossby number.
"""
return prot/convective_overturn_time(mass)
``` |
{
"source": "John-L-Jones-IV/6.0001",
"score": 4
} |
#### File: 6.0001/ps2/test_hangman.py
```python
import unittest
from hangman import *
'''
This class is a unittest module for hangman.py
each method test_$function in hangman.py$ is designed
to verify the functionaly of each coresponding $function_in_hangman.py$
To use:
From the console in the project directory containing test_hangman.py, hangman.py, and words.txt
run:
python -m unittest test_hangman
The console output should tell you the result of each test
'''
class TestFindLetter(unittest.TestCase):
def test_is_word_guessed(self):
secret_word = 'apple'
letters_guessed = ['e', 'i', 'k', 'p', 'r', 's']
self.assertFalse(is_word_guessed(secret_word, letters_guessed))
secret_word = 'apple'
letters_guessed = ['a', 'p', 'l', 'e']
self.assertTrue(is_word_guessed(secret_word, letters_guessed))
def test_get_guessed_word(self):
secret_word = 'apple'
letters_guessed = ['e', 'i', 'k', 'p', 'r', 's']
self.assertEqual(get_guessed_word(secret_word,letters_guessed),'_pp_e')
def test_get_available_letters(self):
letters_guessed = ['e', 'i', 'k', 'p', 'r', 's']
expected_letters = 'abcdfghjlmnoqtuvwxyz'
self.assertEqual(get_available_letters(letters_guessed),expected_letters)
letters_guessed = ['a','b','c','d','e']
expected_letters = 'fghijklmnopqrstuvwxyz'
self.assertEqual(get_available_letters(letters_guessed),expected_letters)
def test_match_with_gaps(self):
self.assertFalse(match_with_gaps('te_t', 'tact'))
self.assertFalse(match_with_gaps('a__le', 'banana'))
self.assertTrue(match_with_gaps('a__le','apple'))
self.assertFalse(match_with_gaps('a_ple', 'apple'))
self.assertTrue(match_with_gaps('r_d', 'red'))
self.assertTrue(match_with_gaps('r_d', 'rad'))
self.assertFalse(match_with_gaps('s_x', 'twelve'))
def test_show_possible_matches(self):
self.assertEqual(show_possible_matches("t__t",'debug'),\
"tact tart taut teat tent test text that tilt tint toot tort tout trot tuft twit")
self.assertEqual(show_possible_matches("abbbb_",'debug'), "No matches found")
self.assertEqual(show_possible_matches("a_pl_",'debug'), "ample amply")
def test_num_unique_chars(self):
self.assertEqual(num_unique_chars('aabbccdd'), 4)
self.assertEqual(num_unique_chars('abcd'), 4)
self.assertEqual(num_unique_chars('zyxw'), 4)
self.assertEqual(num_unique_chars("xxrrdda"), 4)
self.assertEqual(num_unique_chars('aAbBcC'),3)
```
#### File: 6.0001/ps2/test_Lect7.py
```python
from Lect7 import *
def test_abs():
""" Unit test for abs() """
failure = False
if not failure:
print('SUCESS')
print('Testing abs()...')
test_abs()
```
#### File: 6.0001/ps3/test_ps3.py
```python
from ps3 import *
#
# Test code
#
def test_get_word_score():
"""
Unit test for get_word_score
"""
failure=False
# dictionary of words and scores
words = {("", 7):0, ("it", 7):2, ("was", 7):54, ("weed", 6):176,
("scored", 7):351, ("WaYbILl", 7):735, ("Outgnaw", 7):539,
("fork", 7):209, ("FORK", 4):308}
for (word, n) in words.keys():
score = get_word_score(word, n)
if score != words[(word, n)]:
print("FAILURE: test_get_word_score()")
print("\tExpected", words[(word, n)], "points but got '" + \
str(score) + "' for word '" + word + "', n=" + str(n))
failure=True
if not failure:
print("SUCCESS: test_get_word_score()")
# end of test_get_word_score
def test_update_hand():
"""
Unit test for update_hand
"""
# test 1
handOrig = {'a':1, 'q':1, 'l':2, 'm':1, 'u':1, 'i':1}
handCopy = handOrig.copy()
word = "quail"
hand2 = update_hand(handCopy, word)
expected_hand1 = {'l':1, 'm':1}
expected_hand2 = {'a':0, 'q':0, 'l':1, 'm':1, 'u':0, 'i':0}
if hand2 != expected_hand1 and hand2 != expected_hand2:
print("FAILURE: test_update_hand('"+ word +"', " + str(handOrig) + ")")
print("\tReturned: ", hand2, "\n\t-- but expected:", expected_hand1, "or", expected_hand2)
return # exit function
if handCopy != handOrig:
print("FAILURE: test_update_hand('"+ word +"', " + str(handOrig) + ")")
print("\tOriginal hand was", handOrig)
print("\tbut implementation of update_hand mutated the original hand!")
print("\tNow the hand looks like this:", handCopy)
return # exit function
# test 2
handOrig = {'e':1, 'v':2, 'n':1, 'i':1, 'l':2}
handCopy = handOrig.copy()
word = "Evil"
hand2 = update_hand(handCopy, word)
expected_hand1 = {'v':1, 'n':1, 'l':1}
expected_hand2 = {'e':0, 'v':1, 'n':1, 'i':0, 'l':1}
if hand2 != expected_hand1 and hand2 != expected_hand2:
print("FAILURE: test_update_hand('"+ word +"', " + str(handOrig) + ")")
print("\tReturned: ", hand2, "\n\t-- but expected:", expected_hand1, "or", expected_hand2)
return # exit function
if handCopy != handOrig:
print("FAILURE: test_update_hand('"+ word +"', " + str(handOrig) + ")")
print("\tOriginal hand was", handOrig)
print("\tbut implementation of update_hand mutated the original hand!")
print("\tNow the hand looks like this:", handCopy)
return # exit function
# test 3
handOrig = {'h': 1, 'e': 1, 'l': 2, 'o': 1}
handCopy = handOrig.copy()
word = "HELLO"
hand2 = update_hand(handCopy, word)
expected_hand1 = {}
expected_hand2 = {'h': 0, 'e': 0, 'l': 0, 'o': 0}
if hand2 != expected_hand1 and hand2 != expected_hand2:
print("FAILURE: test_update_hand('"+ word +"', " + str(handOrig) + ")")
print("\tReturned: ", hand2, "\n\t-- but expected:", expected_hand1, "or", expected_hand2)
return # exit function
if handCopy != handOrig:
print("FAILURE: test_update_hand('"+ word +"', " + str(handOrig) + ")")
print("\tOriginal hand was", handOrig)
print("\tbut implementation of update_hand mutated the original hand!")
print("\tNow the hand looks like this:", handCopy)
return # exit function
print("SUCCESS: test_update_hand()")
# end of test_update_hand
def test_is_valid_word(word_list):
"""
Unit test for is_valid_word
"""
failure=False
# test 1
word = "hello"
handOrig = get_frequency_dict(word)
handCopy = handOrig.copy()
if not is_valid_word(word, handCopy, word_list):
print("FAILURE: test_is_valid_word()")
print("\tExpected True, but got False for word: '" + word + "' and hand:", handOrig)
failure = True
# Test a second time to see if word_list or hand has been modified
if not is_valid_word(word, handCopy, word_list):
print("FAILURE: test_is_valid_word()")
if handCopy != handOrig:
print("\tTesting word", word, "for a second time - be sure you're not modifying hand.")
print("\tAt this point, hand ought to be", handOrig, "but it is", handCopy)
else:
print("\tTesting word", word, "for a second time - have you modified word_list?")
wordInWL = word in word_list
print("The word", word, "should be in word_list - is it?", wordInWL)
print("\tExpected True, but got False for word: '" + word + "' and hand:", handCopy)
failure = True
# test 2
hand = {'r': 1, 'a': 3, 'p': 2, 'e': 1, 't': 1, 'u':1}
word = "Rapture"
if is_valid_word(word, hand, word_list):
print("FAILURE: test_is_valid_word()")
print("\tExpected False, but got True for word: '" + word + "' and hand:", hand)
failure = True
# test 3
hand = {'n': 1, 'h': 1, 'o': 1, 'y': 1, 'd':1, 'w':1, 'e': 2}
word = "honey"
if not is_valid_word(word, hand, word_list):
print("FAILURE: test_is_valid_word()")
print("\tExpected True, but got False for word: '"+ word +"' and hand:", hand)
failure = True
# test 4
hand = {'r': 1, 'a': 3, 'p': 2, 't': 1, 'u':2}
word = "honey"
if is_valid_word(word, hand, word_list):
print("FAILURE: test_is_valid_word()")
print("\tExpected False, but got True for word: '" + word + "' and hand:", hand)
failure = True
# test 5
hand = {'e':1, 'v':2, 'n':1, 'i':1, 'l':2}
word = "EVIL"
if not is_valid_word(word, hand, word_list):
print("FAILURE: test_is_valid_word()")
print("\tExpected True, but got False for word: '" + word + "' and hand:", hand)
failure = True
# test 6
word = "Even"
if is_valid_word(word, hand, word_list):
print("FAILURE: test_is_valid_word()")
print("\tExpected False, but got True for word: '" + word + "' and hand:", hand)
print("\t(If this is the only failure, make sure is_valid_word() isn't mutating its inputs)")
failure = True
if not failure:
print("SUCCESS: test_is_valid_word()")
# end of test_is_valid_word
def test_wildcard(word_list):
"""
Unit test for is_valid_word
"""
failure=False
# test 1
hand = {'a': 1, 'r': 1, 'e': 1, 'j': 2, 'm': 1, '*': 1}
word = "e*m"
if is_valid_word(word, hand, word_list):
print("FAILURE: test_is_valid_word() with wildcards")
print("\tExpected False, but got True for word: '" + word + "' and hand:", hand)
failure = True
# test 2
hand = {'n': 1, 'h': 1, '*': 1, 'y': 1, 'd':1, 'w':1, 'e': 2}
word = "honey"
if is_valid_word(word, hand, word_list):
print("FAILURE: test_is_valid_word() with wildcards")
print("\tExpected False, but got True for word: '"+ word +"' and hand:", hand)
failure = True
# test 3
hand = {'n': 1, 'h': 1, '*': 1, 'y': 1, 'd':1, 'w':1, 'e': 2}
word = "h*ney"
if not is_valid_word(word, hand, word_list):
print("FAILURE: test_is_valid_word() with wildcards")
print("\tExpected True, but got False for word: '"+ word +"' and hand:", hand)
failure = True
# test 4
hand = {'c': 1, 'o': 1, '*': 1, 'w': 1, 's':1, 'z':1, 'y': 2}
word = "c*wz"
if is_valid_word(word, hand, word_list):
print("FAILURE: test_is_valid_word() with wildcards")
print("\tExpected False, but got True for word: '"+ word +"' and hand:", hand)
failure = True
# dictionary of words and scores WITH wildcards
words = {("h*ney", 7):290, ("c*ws", 6):176, ("wa*ls", 7):203}
for (word, n) in words.keys():
score = get_word_score(word, n)
if score != words[(word, n)]:
print("FAILURE: test_get_word_score() with wildcards")
print("\tExpected", words[(word, n)], "points but got '" + \
str(score) + "' for word '" + word + "', n=" + str(n))
failure=True
if not failure:
print("SUCCESS: test_wildcard()")
def test_find_vowels():
failure = False
# test 1
word = 'abc'
index = find_vowels(word)
expected = [0]
if index != expected:
print("FAILURE: test_find_vowels()\nExpected", expected, "but got", index)
failue = True
# test 2
word = 'zebra'
index = find_vowels(word)
expected = [1,4]
if index != expected:
print("FAILURE: test_find_vowels()\nExpected", expected, "but got", index)
failue = True
# test 3
word = 'reddit'
index = find_vowels(word)
expected = [1,4]
if index != expected:
print("FAILURE: test_find_vowels()\nExpected", expected, "but got", index)
failue = True
# test 4
word = 'aeioyu'
index = find_vowels(word)
expected = [0,1,2,3,5]
if index != expected:
print("FAILURE: test_find_vowels()\nExpected", expected, "but got", index)
failue = True
# test 5
word = 'bzrtv'
index = find_vowels(word)
expected = []
if index != expected:
print("FAILURE: test_find_vowels()\nExpected", expected, "but got", index)
failue = True
if not failure:
print('SUCCESS: test_find_vowels()')
def test_replace_vowels():
failure = False
# test 1
word = 'apple'
words = replace_vowels(word)
expected = ['*pple', 'appl*']
if words != expected:
print("FAILURE: test_replace_vowels()\nExpected", expected, "but got", words)
failue = True
# test 2
word = 'reddit'
words = replace_vowels(word)
expected = ['r*ddit', 'redd*t']
if words != expected:
print("FAILURE: test_replace_vowels()\nExpected", expected, "but got", words)
failue = True
# test 3
word = 'foo'
words = replace_vowels(word)
expected = ['f*o', 'fo*']
if words != expected:
print("FAILURE: test_replace_vowels()\nExpected", expected, "but got", words)
failue = True
# test 4
word = 'bar'
words = replace_vowels(word)
expected = ['b*r']
if words != expected:
print("FAILURE: test_replace_vowels()\nExpected", expected, "but got", words)
failue = True
# test 5
word = 'baz'
words = replace_vowels(word)
expected = ['b*z']
if words != expected:
print("FAILURE: test_replace_vowels()\nExpected", expected, "but got", words)
failue = True
if not failure:
print('SUCCESS: test_replace_vowels()')
def test_calculate_handlen():
failure = False
# test 1
hand = {'a':1, 'p':2, 'l':1, 'e':1, 'f':1 , 'r':1, 'x':0 ,'y':0}
N = calculate_handlen(hand)
expected = 1+2+1+1+1+1
if N != expected:
print("FAILURE: test_calculate_handlen()\nExpected", expected, "but got", N)
failue = True
# test 2
hand = {'a':61, 'p':2, 'l':1, 'e':1, 'f':1 , 'r':1, 'x':0 ,'y':0}
N = calculate_handlen(hand)
expected = 61+2+1+1+1+1
if N != expected:
print("FAILURE: test_calculate_handlen()\nExpected", expected, "but got", N)
failue = True
# test 3
hand = {'a':0, 'p':0, 'l':0, 'e':0, 'f':0, 'r':0, 'x':0 ,'y':0}
N = calculate_handlen(hand)
expected = 0
if N != expected:
print("FAILURE: test_calculate_handlen()\nExpected", expected, "but got", N)
failue = True
# test 4
hand = {'f':1, 'o':2, 'b':2, 'a':2, 'r':1, 'z':1 ,'y':0}
N = calculate_handlen(hand)
expected = 1+2+2+2+1+1
if N != expected:
print("FAILURE: test_calculate_handlen()\nExpected", expected, "but got", N)
failue = True
# test 5
hand = {'f':1, 'o':2, 'b':2, 'a':2, 'r':1, 'z':1}
N = calculate_handlen(hand)
expected = 1+2+2+2+1+1
if N != expected:
print("FAILURE: test_calculate_handlen()\nExpected", expected, "but got", N)
failue = True
if not failure:
print('SUCCESS: test_calculate_handlen()')
def test_substitute_hand():
failure = False
# test 1
hand = {'a':1, 'p':2, 'l':1, 'e':1, 'f':1 , 'r':1, 'x':0 ,'y':0}
N = calculate_handlen(hand)
expected = 1+2+1+1+1+1
if N != expected:
print("FAILURE: test_calculate_handlen()\nExpected", expected, "but got", N)
failue = True
# test 2
hand = {'a':1, 'p':2, 'l':1, 'e':1}
N = calculate_handlen(hand)
expected = 1+2+1+1
if N != expected:
print("FAILURE: test_calculate_handlen()\nExpected", expected, "but got", N)
failue = True
if not failure:
print('SUCCESS: test_substitue_hand()')
def test_is_in_hand():
failue = False
# test 1
hand = {'f':1, 'o':2}
letter = 'f'
b_result = is_in_hand(letter,hand)
expected = True
if expected != b_result:
print('FAILURE: test_is_in_hand()\nExpected', expected, 'but got', b_result)
failure = True
# test 2
hand = {'f':1, 'o':2}
letter = 'b'
b_result = is_in_hand(letter,hand)
expected = False
if expected != b_result:
print('FAILURE: test_is_in_hand()\nExpected', expected, 'but got', b_result)
failure = True
# test 3
hand = {'g':1, 'o':2, 's':1, 'x':1, 'y':0, 'a':0, 'n':0, 'k':0}
letter = 'n'
b_result = is_in_hand(letter,hand)
expected = False
if expected != b_result:
print('FAILURE: test_is_in_hand()\nExpected', expected, 'but got', b_result)
failure = True
# test 4
hand = {'g':1, 'o':2, 's':1, 'x':1, 'y':0, 'a':0, 'n':0, 'k':0}
letter = 'x'
b_result = is_in_hand(letter,hand)
expected = True
if expected != b_result:
print('FAILURE: test_is_in_hand()\nExpected', expected, 'but got', b_result)
failure = True
if not failue:
print('SUCCESS: test_is_in_hand()')
def test_calculate_num_vowels():
failure = False
# test 1
hand = {'a':1, 'b':1, 'c':1, 'd':1, 'e': 1, 'f':1 }
n = 2
output = calculate_num_vowels(hand)
if output != n:
print('FAILURE: test_calculate_vowels()\nExpected', n,'but got',output)
display_hand(hand)
failure = True
# test 2
hand = {'a':1, 'b':1, 'c':1, 'd':1, 'e': 1, 'f':1, 'i': 2 }
n = 4
output = calculate_num_vowels(hand)
if output != n:
print('FAILURE: test_calculate_vowels()\nExpected', n,'but got',output)
display_hand(hand)
failure = True
# test 3
hand = {'b': 0, 'c':1, 'd':1, 'e': 1, 'f':1, 'i': 2, 'a':1, 'z':0, 'u':4 }
n = 8
output = calculate_num_vowels(hand)
if output != n:
print('FAILURE: test_calculate_vowels()\nExpected', n,'but got',output)
display_hand(hand)
failure = True
if not failure:
print('SUCCESS: test_calculate_num_vowels')
def test_deal_hand():
failure = False
fails = 0
# test 1 - test size of hand
N = 7
K = 100
for i in range(K):
hand = deal_hand(7)
if N != calculate_handlen(hand):
print('FAILURE: test_deal_hand()\nExpected', N, 'but got', calculate_handlen(hand))
failure = True
fails += 1
elif calculate_num_vowels(hand) != int(math.ceil(N/3)):
print('FAILURE: test_deal_hand()\nExpected', int(math.ceil(N/3)), 'vowels but got ', calculate_num_vowels(hand))
failure = True
fails += 1
if not failure:
print('SUCCESS: test_deal_hand()')
elif failure:
print('test_deal_hand() percent failue:', fails/K)
word_list = load_words()
print("----------------------------------------------------------------------")
print("Testing get_word_score...")
test_get_word_score()
print("----------------------------------------------------------------------")
print("Testing update_hand...")
test_update_hand()
print("----------------------------------------------------------------------")
print("Testing is_valid_word...")
test_is_valid_word(word_list)
print("----------------------------------------------------------------------")
print("Testing wildcards...")
test_wildcard(word_list)
print("----------------------------------------------------------------------")
print("Testing find_vowels...")
test_find_vowels()
print("----------------------------------------------------------------------")
print("Testing replace_vowels...")
test_replace_vowels()
print("----------------------------------------------------------------------")
print("Testing calculate_handlen...")
test_calculate_handlen()
print("----------------------------------------------------------------------")
print("Testing substitute_hand...")
test_substitute_hand()
print("----------------------------------------------------------------------")
print("Testing calculate_num_vowels...")
test_calculate_num_vowels()
print("----------------------------------------------------------------------")
print("Testing is_in_hand...")
test_is_in_hand()
print("----------------------------------------------------------------------")
print("Testing deal_hand...")
test_deal_hand()
print("----------------------------------------------------------------------")
print("All done!")
``` |
{
"source": "John-L-Jones-IV/6.0002",
"score": 3
} |
#### File: 6.0002/ps1/test_ps1.py
```python
import unittest
from ps1a import *
from ps1b import *
class TestCows(unittest.TestCase):
def test_load_cows(self):
varin = 'test1.txt'
out = {'A':1,'B':2, 'C':3}
self.assertEqual(load_cows(varin), out)
varin = 'test2.txt'
out = {'Bob':109}
self.assertEqual(load_cows(varin), out)
varin = 'test3.txt'
out = {'Bob':5, 'Bill':1}
self.assertEqual(load_cows(varin), out)
varin = 'ps1_cow_data.txt'
out = {'Maggie':3,'Herman':7,'Betsy':9,'Oreo':6,'Moo Moo':3,'Milkshake':2,'Millie':5,'Lola':2,'Florence':2,'Henrietta':9}
self.assertEqual(load_cows(varin), out)
varin = 'ps1_cow_data_2.txt'
out = {'<NAME>':3, 'Milkshake':4,'Lotus':10,'<NAME>':2,'Horns':9,'Betsy':5,'Rose':3,'Dottie':6}
self.assertEqual(load_cows(varin), out)
def test_greedy_cow_transport(self):
cows = {'Bob':9, 'Sally': 8, 'Billy':3, 'Fred':3, 'Josh':3}
cows_cpy = cows.copy()
L =[['Bob'],['Sally'],['Billy','Fred','Josh']]
self.assertEqual(greedy_cow_transport(cows), L)
self.assertEqual(cows,cows_cpy)
cows = {'Bob':11, 'Sally': 8, 'Billy':3, 'Fred':3, 'Josh':3}
cows_cpy = cows.copy()
L =[['Sally'],['Billy','Fred','Josh']]
self.assertEqual(greedy_cow_transport(cows), L)
self.assertEqual(cows,cows_cpy)
cows = {'A':1, 'B': 2, 'C':3, 'D':4, 'E':5, 'F':6, 'G':7, 'H':8}
cows_cpy = cows.copy()
L =[['H','B'],['G','C'],['F','D'],['E','A']]
self.assertEqual(greedy_cow_transport(cows), L)
self.assertEqual(cows,cows_cpy)
cows = {'A':12, 'B': 13}
cows_cpy = cows.copy()
L =[['A']]
self.assertEqual(greedy_cow_transport(cows,limit = 12), L)
self.assertEqual(cows,cows_cpy)
cows = {'A':12, 'B': 11}
cows_cpy = cows.copy()
L =[['A'],['B']]
self.assertEqual(greedy_cow_transport(cows,limit = 12), L)
self.assertEqual(cows,cows_cpy)
def test_brute_force_cow_transport(self):
cows = {'A':5, 'B':5, 'C':7}
cows_cpy = cows.copy()
L = [['C'],['A','B']]
self.assertEqual(greedy_cow_transport(cows), L)
self.assertEqual(cows,cows_cpy)
cows = {'A':8, 'B':3, 'C':7}
cows_cpy = cows.copy()
L = [['A'],['C','B']]
self.assertEqual(greedy_cow_transport(cows), L)
self.assertEqual(cows,cows_cpy)
```
#### File: 6.0002/ps4/ps4_tests.py
```python
import sys
import unittest
import numpy as np
import ps4
population = [[100, 115, 122, 129, 134, 138, 151, 167, 174, 183, 196, 208, 215, 223, 233,
240, 253, 268, 284, 294, 306, 316, 325, 338, 360, 372, 378, 388, 399, 415, 414, 431, 456,
477, 485, 493, 510, 530, 547, 569, 575, 580, 579, 588, 597, 605, 625, 626, 632, 640, 653,
660, 668, 681, 685, 690, 695, 691, 693, 689, 696, 706, 720, 717, 718, 713, 720, 723, 726,
731, 728, 721, 727, 731, 734, 741, 751, 748, 750, 750, 752, 752, 745, 753, 752, 756, 753,
745, 747, 747, 750, 745, 751, 759, 753, 754, 762, 765, 754, 764, 767, 769, 770, 775, 784,
787, 789, 786, 783, 773, 770, 764, 764, 767, 767, 768, 765, 765, 750, 753, 745, 745, 746,
753, 754, 763, 767, 777, 778, 784, 782, 782, 783, 788, 790, 782, 786, 792, 799, 792, 779,
778, 768, 768, 768, 775, 774, 783, 782, 778, 778, 789, 771, 775, 770, 780, 778, 780, 771,
765, 762, 758, 768, 762, 777, 774, 776, 779, 771, 768, 781, 783, 793, 801, 803, 798, 794,
798, 799, 801, 804, 802, 807, 795, 776, 773, 779, 775, 777, 783, 791, 787, 778, 782, 789,
782, 773, 775, 782, 779, 778, 774, 776, 782, 770, 773, 775, 772, 777, 772, 772, 774, 771,
760, 764, 766, 758, 759, 758, 745, 744, 754, 760, 770, 765, 764, 754, 769, 760, 762, 762,
765, 754, 762, 762, 764, 757, 762, 759, 758, 748, 752, 764, 758, 762, 761, 755, 747, 746,
744, 750, 748, 746, 756, 762, 758, 754, 758, 754, 747, 750, 752, 744, 741, 744, 756, 768,
773, 772, 768, 764, 762, 754, 761, 760, 749, 746, 744, 741, 748, 745, 751, 753, 744, 736,
746, 749, 749, 762, 756, 762, 762, 756, 761, 762, 762, 755, 763, 772, 761], [100, 113, 125,
129, 136, 151, 166, 177, 186, 196, 208, 215, 219, 235, 239, 257, 270, 288, 299, 310, 322, 335,
344, 354, 375, 395, 408, 429, 446, 451, 471, 497, 515, 528, 525, 542, 558, 567, 580, 593, 604,
613, 619, 628, 631, 645, 656, 676, 676, 685, 704, 711, 715, 724, 724, 725, 725, 725, 740, 737,
736, 752, 757, 759, 762, 762, 771, 759, 755, 754, 752, 752, 755, 765, 766, 766, 761, 766, 761,
752, 755, 756, 765, 769, 768, 770, 769, 772, 766, 770, 771, 773, 782, 771, 768, 767, 769, 781,
779, 780, 775, 772, 761, 759, 760, 762, 761, 763, 756, 758, 766, 759, 748, 751, 750, 750, 761,
756, 767, 776, 780, 780, 767, 762, 759, 760, 757, 761, 766, 770, 757, 758, 763, 759, 754, 746,
754, 760, 755, 758, 757, 769, 773, 773, 764, 770, 770, 770, 774, 768, 775, 779, 779, 769, 766,
766, 769, 759, 749, 756, 776, 770, 771, 761, 765, 766, 771, 783, 782, 774, 774, 771, 765, 753,
767, 770, 771, 769, 770, 767, 764, 757, 763, 769, 766, 767, 776, 773, 771, 775, 771, 776, 767,
756, 760, 764, 757, 753, 745, 745, 759, 751, 752, 749, 740, 748, 740, 740, 742, 740, 737, 744,
739, 744, 750, 753, 751, 750, 764, 775, 759, 762, 767, 772, 774, 781, 776, 772, 778, 785, 771,
762, 757, 752, 747, 754, 757, 757, 763, 766, 765, 758, 762, 760, 757, 765, 769, 764, 761, 762,
764, 762, 751, 752, 747, 747, 750, 752, 765, 771, 766, 765, 755, 751, 750, 743, 749, 750, 743,
752, 749, 736, 750, 749, 746, 754, 744, 743, 730, 730, 719, 721, 724, 731, 732, 735, 746, 740,
741, 750, 750, 740, 738, 741, 734, 728, 745, 740, 732, 738], [100, 112, 117, 130, 139, 149,
156, 169, 172, 189, 200, 216, 223, 233, 247, 257, 268, 280, 292, 302, 308, 323, 338, 346, 359,
379, 388, 390, 410, 427, 447, 462, 469, 485, 499, 521, 536, 548, 557, 555, 566, 571, 577, 580,
592, 607, 612, 620, 628, 629, 629, 635, 647, 657, 661, 672, 689, 694, 697, 713, 715, 720, 724,
734, 746, 749, 736, 740, 752, 763, 759, 751, 753, 749, 741, 743, 750, 751, 758, 769, 775, 784,
784, 786, 789, 790, 798, 800, 794, 802, 796, 801, 803, 791, 795, 785, 779, 768, 758, 752, 753,
749, 759, 763, 754, 754, 753, 761, 772, 765, 768, 769, 771, 772, 768, 766, 764, 761, 770, 771,
773, 771, 768, 760, 756, 759, 755, 763, 758, 753, 757, 756, 764, 765, 763, 768, 770, 776, 776,
776, 778, 765, 769, 760, 763, 759, 770, 772, 778, 768, 777, 779, 782, 777, 774, 783, 776, 771,
775, 766, 769, 767, 763, 759, 749, 751, 746, 747, 746, 740, 743, 749, 757, 750, 752, 762, 768,
771, 769, 779, 775, 779, 772, 777, 785, 784, 782, 793, 784, 786, 788, 780, 781, 779, 773, 778,
780, 774, 766, 767, 765, 764, 766, 770, 765, 776, 785, 785, 792, 788, 786, 790, 785, 788, 793,
793, 788, 792, 789, 774, 775, 769, 770, 770, 773, 775, 770, 769, 763, 758, 766, 776, 776, 776,
778, 771, 775, 777, 776, 770, 773, 767, 761, 765, 762, 770, 772, 775, 781, 779, 767, 766, 767,
763, 763, 755, 753, 751, 758, 761, 764, 771, 772, 762, 764, 758, 756, 754, 752, 752, 748, 753,
763, 766, 766, 758, 756, 752, 759, 753, 749, 754, 751, 750, 751, 749, 751, 747, 751, 753, 739,
747, 745, 747, 748, 746, 755, 755, 760, 766], [100, 106, 113, 111, 117, 124, 136, 139, 152,
154, 161, 168, 176, 182, 194, 210, 226, 239, 256, 274, 287, 297, 314, 329, 343, 355, 356, 362,
376, 394, 405, 421, 432, 448, 471, 497, 508, 520, 525, 530, 538, 560, 576, 595, 604, 619, 635,
654, 656, 672, 683, 683, 692, 705, 704, 706, 705, 703, 710, 710, 714, 712, 722, 736, 737, 730,
727, 735, 734, 743, 752, 757, 751, 755, 769, 764, 769, 763, 764, 767, 762, 753, 744, 751, 741,
733, 733, 729, 734, 733, 745, 748, 750, 751, 746, 755, 751, 754, 755, 750, 753, 752, 754, 757,
760, 767, 768, 761, 763, 752, 748, 747, 747, 749, 765, 771, 774, 765, 763, 760, 758, 756, 754,
752, 736, 744, 751, 760, 757, 756, 755, 773, 775, 769, 765, 768, 773, 779, 771, 778, 765, 766,
760, 754, 746, 747, 749, 756, 757, 757, 761, 758, 746, 739, 745, 748, 756, 764, 765, 772, 776,
778, 772, 780, 777, 772, 763, 764, 771, 777, 776, 775, 780, 769, 770, 765, 759, 761, 758, 762,
759, 766, 774, 769, 769, 770, 773, 773, 777, 770, 770, 769, 761, 760, 767, 766, 765, 762, 758,
763, 760, 767, 760, 761, 762, 766, 765, 778, 776, 782, 773, 770, 782, 778, 776, 770, 767, 766,
755, 756, 753, 747, 744, 759, 760, 742, 746, 744, 748, 762, 759, 762, 770, 774, 784, 773, 763,
749, 742, 747, 731, 728, 731, 736, 745, 743, 737, 736, 736, 739, 739, 743, 740, 748, 760, 754,
757, 765, 772, 766, 767, 764, 751, 750, 750, 750, 753, 763, 767, 762, 765, 768, 774, 770, 768,
766, 765, 752, 745, 749, 751, 750, 750, 753, 747, 755, 762, 762, 770, 762, 756, 754, 754, 757,
763, 760, 752, 753, 765, 770], [100, 109, 121, 127, 135, 146, 150, 160, 167, 180, 196, 206,
226, 244, 254, 263, 277, 303, 310, 321, 325, 342, 356, 372, 383, 394, 407, 418, 422, 430, 459,
477, 485, 504, 517, 518, 520, 532, 542, 558, 574, 594, 607, 602, 606, 615, 628, 636, 654, 660,
656, 660, 662, 673, 684, 686, 698, 714, 715, 723, 727, 739, 736, 733, 741, 744, 744, 742, 751,
757, 758, 753, 754, 755, 758, 757, 763, 757, 754, 743, 740, 738, 739, 740, 739, 745, 739, 741,
736, 726, 737, 737, 740, 749, 750, 756, 754, 761, 774, 783, 781, 781, 773, 759, 754, 752, 754,
761, 749, 740, 739, 732, 727, 730, 744, 753, 763, 753, 752, 753, 761, 759, 759, 753, 743, 749,
743, 730, 734, 735, 737, 748, 756, 760, 754, 752, 758, 756, 758, 758, 764, 754, 756, 750, 759,
755, 759, 756, 752, 759, 761, 758, 750, 750, 756, 760, 764, 761, 764, 769, 761, 764, 761, 756,
749, 754, 768, 752, 749, 757, 751, 744, 752, 756, 753, 767, 770, 770, 762, 747, 749, 750, 747,
750, 748, 744, 750, 748, 742, 740, 741, 742, 750, 757, 750, 758, 755, 755, 745, 732, 728, 726,
735, 745, 752, 747, 752, 753, 747, 756, 748, 748, 751, 753, 747, 749, 756, 760, 761, 757, 756,
759, 753, 743, 751, 749, 756, 760, 774, 770, 780, 780, 775, 769, 756, 759, 761, 767, 774, 773,
770, 768, 773, 770, 765, 771, 759, 758, 753, 747, 739, 740, 741, 744, 741, 736, 743, 731, 740,
735, 736, 738, 734, 739, 736, 731, 732, 730, 730, 733, 730, 726, 735, 745, 745, 749, 747, 747,
750, 755, 754, 747, 762, 761, 764, 773, 769, 771, 771, 767, 761, 756, 753, 746, 757, 755, 756,
766, 759, 764], [100, 107, 112, 113, 124, 131, 134, 137, 148, 160, 174, 190, 201, 216, 225,
237, 246, 253, 259, 270, 277, 287, 305, 327, 351, 375, 381, 400, 425, 431, 454, 474, 493, 505,
523, 525, 536, 547, 559, 570, 579, 578, 588, 590, 609, 611, 620, 623, 631, 634, 640, 640, 642,
641, 657, 670, 672, 678, 683, 696, 700, 710, 717, 728, 725, 720, 722, 725, 730, 722, 725, 722,
732, 727, 732, 733, 732, 733, 743, 739, 747, 737, 737, 739, 745, 749, 748, 753, 738, 739, 742,
741, 748, 753, 761, 762, 761, 763, 770, 765, 755, 751, 750, 747, 757, 760, 771, 773, 772, 769,
777, 763, 762, 757, 759, 754, 750, 752, 753, 759, 762, 767, 759, 765, 771, 762, 764, 759, 763,
770, 768, 766, 754, 745, 747, 732, 719, 728, 733, 734, 731, 739, 744, 750, 753, 760, 763, 772,
775, 760, 764, 773, 777, 773, 766, 772, 775, 777, 779, 775, 784, 783, 772, 772, 764, 762, 759,
756, 768, 764, 768, 758, 754, 756, 755, 751, 752, 753, 762, 766, 768, 769, 779, 783, 785, 783,
785, 784, 782, 787, 787, 783, 788, 787, 787, 796, 786, 783, 791, 773, 786, 786, 792, 785, 788,
791, 785, 781, 784, 773, 777, 765, 772, 779, 770, 763, 755, 765, 764, 756, 755, 755, 749, 750,
746, 744, 758, 759, 760, 770, 772, 762, 757, 754, 752, 741, 740, 747, 754, 753, 762, 765, 761,
758, 759, 759, 770, 770, 757, 756, 767, 767, 766, 763, 765, 769, 771, 783, 796, 799, 797, 803,
802, 788, 789, 789, 794, 791, 796, 795, 795, 792, 781, 780, 783, 775, 772, 769, 763, 773, 771,
773, 772, 764, 758, 759, 760, 764, 753, 763, 768, 766, 760, 757, 756, 761, 760, 760, 753, 755],
[100, 107, 113, 118, 124, 136, 140, 157, 165, 172, 182, 195, 201, 209, 214, 226, 236, 250, 256,
273, 288, 292, 306, 313, 325, 333, 347, 369, 388, 406, 423, 436, 453, 456, 472, 484, 490, 514,
524, 539, 553, 565, 580, 580, 590, 594, 603, 618, 622, 620, 635, 637, 646, 653, 654, 654, 661,
674, 679, 690, 699, 697, 694, 705, 695, 705, 707, 712, 718, 727, 728, 735, 730, 730, 729, 732,
724, 720, 727, 743, 748, 752, 759, 760, 765, 759, 752, 756, 746, 745, 732, 734, 741, 741, 747,
746, 737, 737, 733, 734, 734, 732, 743, 748, 746, 746, 752, 762, 767, 773, 775, 760, 754, 767,
766, 761, 753, 762, 768, 766, 762, 771, 775, 781, 779, 778, 785, 786, 791, 791, 792, 794, 782,
777, 780, 782, 785, 800, 803, 807, 802, 800, 800, 793, 793, 792, 788, 783, 785, 785, 791, 782,
774, 784, 792, 788, 795, 802, 791, 781, 776, 783, 783, 779, 778, 785, 787, 780, 780, 785, 792,
798, 790, 783, 783, 789, 789, 784, 770, 774, 777, 774, 777, 779, 776, 772, 764, 761, 762, 765,
767, 769, 763, 763, 757, 754, 756, 751, 745, 749, 743, 741, 752, 759, 758, 748, 747, 749, 747,
752, 756, 755, 753, 753, 743, 752, 741, 746, 743, 744, 729, 732, 735, 731, 740, 746, 742, 753,
754, 754, 756, 757, 765, 767, 763, 772, 777, 787, 797, 789, 780, 779, 770, 767, 757, 764, 767,
767, 767, 767, 767, 760, 752, 749, 751, 755, 758, 764, 760, 768, 777, 772, 768, 765, 776, 770,
769, 774, 769, 760, 764, 764, 756, 747, 756, 755, 759, 759, 770, 756, 751, 749, 756, 753, 761,
757, 768, 766, 758, 760, 778, 781, 773, 784, 791, 784, 779, 778, 775, 776], [100,
108, 114, 123, 131, 137, 145, 152, 157, 168, 175, 192, 209, 212, 220, 236, 248, 258, 264, 268,
282, 295, 300, 323, 331, 339, 355, 367, 385, 407, 414, 435, 449, 447, 470, 481, 484, 494, 509,
519, 530, 541, 551, 564, 570, 578, 587, 596, 594, 597, 613, 631, 641, 647, 656, 661, 677, 691,
700, 708, 717, 721, 722, 727, 725, 725, 726, 728, 730, 730, 726, 731, 728, 741, 734, 733, 733,
735, 716, 722, 728, 729, 730, 732, 720, 716, 710, 719, 723, 724, 730, 724, 738, 740, 743, 748,
755, 755, 758, 763, 758, 752, 755, 760, 757, 768, 770, 766, 763, 764, 755, 756, 752, 746, 750,
751, 754, 748, 755, 754, 752, 768, 759, 761, 766, 757, 767, 758, 757, 742, 750, 762, 754, 764,
760, 756, 762, 772, 778, 776, 772, 774, 771, 754, 773, 776, 773, 766, 769, 769, 770, 771, 769,
772, 770, 774, 774, 777, 782, 769, 762, 760, 760, 777, 783, 785, 789, 779, 776, 783, 791, 792,
801, 787, 781, 774, 770, 774, 773, 770, 767, 766, 761, 761, 764, 754, 749, 746, 748, 752, 750,
751, 755, 763, 756, 757, 763, 774, 773, 774, 776, 775, 777, 773, 783, 791, 780, 784, 775, 769,
774, 779, 782, 786, 792, 783, 793, 791, 778, 781, 779, 779, 778, 785, 778, 779, 773, 772, 768,
780, 777, 768, 776, 769, 776, 771, 768, 765, 766, 766, 764, 753, 752, 750, 749, 748, 749, 752,
760, 763, 749, 754, 753, 752, 749, 748, 747, 751, 742, 739, 731, 728, 728, 725, 712, 718, 716,
722, 724, 723, 736, 735, 747, 746, 746, 740, 739, 743, 742, 749, 742, 753, 752, 752, 752, 754,
764, 761, 766, 775, 773, 764, 771, 761, 762, 749, 745, 748, 754, 754], [100, 109, 120, 123,
128, 134, 138, 149, 153, 161, 173, 183, 200, 209, 216, 221, 235, 240, 247, 249, 259, 268, 285,
293, 311, 326, 360, 383, 400, 420, 434, 448, 467, 476, 488, 494, 511, 529, 542, 559, 561, 580,
592, 606, 613, 624, 641, 651, 661, 669, 670, 677, 668, 677, 677, 682, 684, 697, 692, 699, 700,
704, 704, 707, 714, 717, 720, 718, 716, 719, 718, 727, 725, 720, 730, 740, 747, 749, 754, 759,
763, 763, 763, 761, 768, 766, 762, 752, 750, 745, 750, 752, 759, 766, 764, 754, 756, 752, 766,
771, 772, 784, 786, 793, 776, 772, 774, 765, 762, 756, 755, 763, 766, 770, 774, 759, 769, 768,
768, 764, 767, 765, 755, 756, 767, 768, 762, 763, 764, 756, 757, 753, 760, 755, 774, 769, 772,
763, 763, 759, 755, 747, 756, 749, 746, 744, 752, 750, 754, 754, 763, 753, 757, 749, 758, 761,
757, 754, 745, 743, 739, 739, 745, 745, 741, 751, 740, 743, 735, 731, 737, 736, 731, 731, 725,
721, 721, 723, 735, 734, 735, 747, 755, 755, 745, 729, 737, 739, 734, 730, 737, 744, 741, 746,
742, 763, 760, 759, 769, 764, 767, 759, 757, 765, 762, 753, 760, 770, 761, 762, 763, 759, 770,
761, 759, 750, 741, 739, 739, 750, 755, 755, 757, 753, 753, 751, 753, 761, 760, 764, 765, 773,
770, 771, 765, 773, 781, 776, 769, 768, 762, 765, 760, 766, 763, 757, 750, 763, 761, 761, 764,
764, 759, 765, 762, 756, 755, 764, 750, 754, 759, 759, 755, 764, 771, 788, 779, 774, 772, 771,
779, 773, 770, 773, 780, 783, 782, 768, 768, 766, 762, 758, 758, 754, 742, 734, 740, 740, 736,
729, 745, 746, 751, 760, 763, 774, 776, 771, 774, 766], [100, 112, 117, 127, 133, 145, 158, 169,
174, 182, 190, 210, 225, 236, 243, 257, 272, 284, 298, 308, 318, 339, 353, 368, 375, 385, 405,
413, 427, 438, 447, 464, 477, 494, 501, 506, 527, 529, 537, 556, 566, 574, 592, 599, 606, 601,
612, 632, 631, 642, 651, 659, 664, 664, 670, 682, 683, 681, 677, 667, 668, 680, 698, 713, 717,
718, 720, 724, 724, 736, 734, 735, 748, 747, 755, 752, 752, 743, 746, 754, 757, 749, 750, 751,
750, 754, 758, 754, 758, 756, 749, 747, 759, 765, 767, 758, 747, 738, 749, 763, 770, 773, 755,
749, 758, 756, 750, 758, 748, 749, 750, 752, 744, 746, 751, 758, 754, 757, 758, 756, 755, 757,
761, 766, 768, 760, 758, 757, 749, 753, 761, 761, 752, 755, 750, 746, 747, 751, 755, 748, 749,
742, 732, 743, 738, 742, 750, 750, 750, 747, 752, 748, 741, 735, 746, 753, 763, 766, 765, 769,
777, 766, 766, 766, 765, 757, 747, 740, 722, 718, 723, 732, 742, 740, 747, 747, 746, 730, 731,
725, 717, 727, 726, 730, 734, 737, 728, 734, 727, 729, 731, 727, 741, 749, 754, 758, 767, 767,
768, 763, 765, 774, 776, 786, 783, 777, 776, 778, 786, 784, 787, 778, 770, 772, 780, 783, 777,
774, 765, 769, 763, 765, 766, 764, 763, 770, 770, 773, 784, 773, 768, 765, 761, 769, 760, 764,
765, 770, 761, 770, 768, 765, 760, 774, 767, 762, 764, 756, 755, 756, 759, 752, 751, 748, 753,
748, 742, 746, 741, 740, 735, 745, 750, 752, 749, 744, 744, 753, 745, 743, 747, 746, 750, 754,
753, 747, 751, 752, 753, 752, 755, 751, 759, 752, 748, 746, 751, 756, 749, 753, 753, 757, 755,
765, 767, 767, 767, 770, 768, 775]]
class ps4_calc(unittest.TestCase):
def test_calc_pop_avg(self):
avg = 762.5
calc_avg = ps4.calc_pop_avg(population, 299)
print(calc_avg)
self.assertTrue(avg-1 < calc_avg < avg+1,
"Got incorrect population average {} instead of {}.".format(calc_avg, avg))
def test_calc_pop_std(self):
std = 10.735455276791944
calc_std = ps4.calc_pop_std(population, 299)
print(calc_std)
self.assertTrue(std -0.1 < calc_std < std + 0.1,
"Got incorrect population standard deviation {} instead of {}.".format(calc_std, std))
def test_calc_95_ci(self):
ci_95 = 6.6539041171330382
calc_avg, calc_ci_95 = ps4.calc_95_ci(population, 299)
print(calc_ci_95)
self.assertTrue(ci_95 - 0.1 < calc_ci_95 < ci_95 + 0.1,
"Got incorrect population 95% CI {} instead of {}.".format(calc_ci_95, ci_95))
class ps4_classes(unittest.TestCase):
def test_simpleBacteria_is_killed(self):
b1 = ps4.SimpleBacteria(0.0, 1.0)
b2 = ps4.SimpleBacteria(1.0, 0.0)
self.assertTrue(b1.is_killed(),
'Expected SimpleBacteria(0.0, 1.0) to be killed with is_killed()')
self.assertFalse(b2.is_killed(),
'Expected SimpleBacteria(1.0, 0.0) to be survive with is_killed()')
def test_simpleBacteria_reproduce(self):
b1 = ps4.SimpleBacteria(0.0, 1.0)
b2 = ps4.SimpleBacteria(1.0, 0.0)
with self.assertRaises(ps4.NoChildException):
b1.reproduce(0)
with self.assertRaises(ps4.NoChildException):
b2.reproduce(1)
offspring_b = b2.reproduce(0)
self.assertIs(type(offspring_b), ps4.SimpleBacteria, 'offspring should be a SimpleBacteria')
self.assertEqual(offspring_b.birth_prob, 1.0)
self.assertEqual(offspring_b.death_prob, 0.0)
class test_functions(unittest.TestCase):
def test_calc_pop_avg(self):
population = [[1, 2, 3],[2, 5, 9],[6, 7, 10]]
self.assertEqual(ps4.calc_pop_avg(population, 0), 2 , 'expected 2')
self.assertEqual(ps4.calc_pop_avg(population, 1), 16/3, 'expected 5 1/3')
self.assertEqual(ps4.calc_pop_avg(population, 2), 23/3, 'expected 7 2/3')
populations = np.array([[1, 2, 3], [10, 20, 30]])
self.assertEqual(ps4.calc_pop_avg(populations, 0), 2)
self.assertEqual(ps4.calc_pop_avg(populations, 1), 20)
if __name__ == "__main__":
suite = unittest.TestSuite()
# suite.addTest(unittest.makeSuite(ps4_calc))
suite.addTest(unittest.makeSuite(ps4_classes))
suite.addTest(unittest.makeSuite(test_functions))
unittest.TextTestRunner(verbosity=3).run(suite)
``` |
{
"source": "johnlk/Multi_Perceptron",
"score": 3
} |
#### File: johnlk/Multi_Perceptron/net.py
```python
from numpy import array, dot, exp, random
import mnist
# favorite number
random.seed(58)
def sigmoid(x):
return 1 / (1 + exp(-x))
def arc_sigmoid(x):
return x * (1 - x)
def compute_loss(error_vector):
loss = 0.
for i in range(len(error_vector)):
loss += sum([element * element for element in error_vector[i] ])
return loss
class Layer():
def __init__(self, num_nodes, num_inputs):
self.weights = 2 * random.random((num_inputs, num_nodes)) - 1
def print_weights(self):
print(self.weights)
class NNet():
def __init__(self, layer1, layer2, layer3):
self.layer1 = layer1
self.layer2 = layer2
self.layer3 = layer3
self.learning_rate = 0.0001
def train(self, input_data, labels, num_epochs):
for epoch in range(num_epochs):
layer1_output, layer2_output, layer3_output = self.generate_layer_output(input_data)
# back propogation steps
layer3_error = labels - layer3_output
layer3_delta = layer3_error * arc_sigmoid(layer3_output)
layer2_error = dot(layer3_delta, self.layer3.weights.T)
layer2_delta = layer2_error * arc_sigmoid(layer2_output)
layer1_error = dot(layer2_delta, self.layer2.weights.T)
layer1_delta = layer1_error * arc_sigmoid(layer1_output)
# update weights after calculating the deltas
self.layer1.weights += dot(input_data.T, layer1_delta) * self.learning_rate
self.layer2.weights += dot(layer1_output.T, layer2_delta) * self.learning_rate
self.layer3.weights += dot(layer2_output.T, layer3_delta) * self.learning_rate
# layer3 is also the output layer
if epoch % 100 == 0:
print("Epoch #: ", epoch, " loss: ", compute_loss(layer3_error))
def generate_layer_output(self, input_data):
layer1_output = sigmoid(dot(input_data, self.layer1.weights))
layer2_output = sigmoid(dot(layer1_output, self.layer2.weights))
layer3_output = sigmoid(dot(layer2_output, self.layer3.weights))
return layer1_output, layer2_output, layer3_output
def predict(self, input_data):
last_layer_output = self.generate_layer_output(input_data)[2]
predictions = []
for row in last_layer_output:
max_index = 0
max_value = 0.
for index in range(10):
if row[index] > max_value:
max_index = index
max_value = row[index]
predictions.append(max_index)
return array(predictions)
def print_net(self):
print("Layer 1 weights:")
self.layer1.print_weights()
print("Layer 2 weights:")
self.layer2.print_weights()
print("Layer 3 weights:")
self.layer3.print_weights()
# 16 nodes with 784 inputs
layer1 = Layer(16, 784)
# 16 nodes with 16 inputs
layer2 = Layer(16, 16)
# 10 nodes with 16 inputs
layer3 = Layer(10, 16)
perceptron = NNet(layer1, layer2, layer3)
x_train, y_train, x_test, y_test = mnist.load()
# train for 1.5k epochs
perceptron.train(x_train, y_train, 1500)
expected_y = perceptron.predict(x_test)
# caculating accuracy
correct = 0
for i in range(len(y_test)):
if y_test[i] == expected_y[i]:
correct += 1
accuracy = correct / len(y_test)
print("Accuracy: ", accuracy)
``` |
{
"source": "johnlk/raspberry_pi_led_wall",
"score": 3
} |
#### File: johnlk/raspberry_pi_led_wall/apis.py
```python
import requests
from key import get_pubnub_obj, get_weather_key
from helpers import kelvin_to_far
def get_weather():
weather = ""
try:
response = requests.get("http://api.openweathermap.org/data/2.5/weather?zip=47905,us&appid=" + get_weather_key())
response = response.json()
weather = "weather: " + kelvin_to_far(response['main']['temp']) + " degrees, "
for forcast in response['weather']:
weather += forcast['description'] + ", "
weather += "wind " + str(response['wind']['speed']) + " mph"
except Exception as err:
print(err)
print("404 weather")
weather = "404 weather"
return weather
def get_chuck_norris_joke():
joke = ""
try:
response = requests.get("https://api.chucknorris.io/jokes/random")
response = response.json()
joke = "Chuck norris joke: " + response['value']
except Exception as err:
print(err)
print("bad chuck norris")
joke = "404 chuck"
return joke
def get_useless_fact():
fact = ""
try:
response = requests.get("https://uselessfacts.jsph.pl//random.json?language=en")
response = response.json()
fact = "useless fact: " + response['text']
except Exception as err:
print(err)
print("404 useless fact")
fact = "404 useless fact"
return fact
def get_trivia():
question = ""
try:
response = requests.get("https://opentdb.com/api.php?amount=1&difficulty=easy&type=multiple")
response = response.json()
response = response['results'][0]
question = "trivia question: " + response['question']
question += " "
question += response['correct_answer']
except Exception as err:
print(err)
print("404 trivia api")
question = "404 trivia"
return question
``` |
{
"source": "johnllopez616/cmsi-386",
"score": 3
} |
#### File: homework2/tests/warmup_test.py
```python
import re
import math
import pytest
from warmup import (change, strip_quotes, say,
interleave, random_name)
def test_change():
assert change(0) == (0, 0, 0, 0)
assert change(97) == (3, 2, 0, 2)
assert change(8) == (0, 0, 1, 3)
assert change(250) == (10, 0, 0, 0)
assert change(144) == (5, 1, 1, 4)
assert change(97) == (3, 2, 0, 2)
assert change(100000000000) == (4000000000, 0, 0, 0)
with pytest.raises(ValueError) as excinfo:
change(-50)
assert str(excinfo.value) == 'amount cannot be negative'
def test_strip_quotes():
assert strip_quotes('') == ''
assert strip_quotes('Hello, world') == 'Hello, world'
assert strip_quotes('"\'') == ''
assert strip_quotes('a"""\'\'"z') == 'az'
# def test_scramble():
# for s in ['a', 'rat', 'JavaScript testing', '', 'zzz', '^*&^*&^▱ÄÈËɡɳɷ']:
# assert sorted(s) == sorted(scramble(s))
# possibilities = set(['ABC', 'ACB', 'BAC', 'BCA', 'CAB', 'CBA'])
# for _ in range(200):
# possibilities.discard(scramble('ABC'))
# assert not possibilities
def test_say():
assert say() == ''
assert say('hi')() == 'hi'
assert say('hi')('there')() == 'hi there'
assert say('hello')('my')('name')('is')('Colette')() == 'hello my name is Colette'
# def test_triples():
# assert triples(0) == []
# assert triples(5) == [(3, 4, 5)]
# assert set(triples(40)) == set([(3, 4, 5), (5, 12, 13), (6, 8, 10), (7, 24, 25), (8, 15, 17),
# (9, 12, 15), (10, 24, 26), (12, 16, 20), (12, 35, 37),
# (15, 20, 25), (15, 36, 39), (16, 30, 34), (18, 24, 30),
# (20, 21, 29), (21, 28, 35), (24, 32, 40)])
#
# def test_powers():
# p = powers(2, 10)
# assert next(p) == 1
# assert next(p) == 2
# assert next(p) == 4
# assert next(p) == 8
# with pytest.raises(StopIteration):
# next(p)
# assert list(powers(2, -5)) == []
# assert list(powers(7, 0)) == []
# assert list(powers(3, 1)) == [1]
# assert list(powers(2, 63)) == [1, 2, 4, 8, 16, 32]
# assert list(powers(2, 64)) == [1, 2, 4, 8, 16, 32, 64]
#
def test_interleave():
assert interleave([]) == []
assert interleave([1, 4, 6]) == [1, 4, 6]
assert interleave([], 2, 3) == [2, 3]
assert interleave([1], 9) == [1, 9]
assert interleave([8, 8, 3, 9], 1) == [8, 1, 8, 3, 9]
assert interleave([2], 7, '8', {}) == [2, 7, '8', {}]
a = [1, 2, 3, 4]
assert interleave(a, 10, 20, 30) == [1, 10, 2, 20, 3, 30, 4]
# Test input list not destroyed
assert a == [1, 2, 3, 4]
# def test_cylinder():
# c = Cylinder(radius=10, height=5)
# assert c.height == 5
# assert c.radius == 10
# c = Cylinder(height=5)
# assert c.height == 5
# assert c.radius == 1
# c = Cylinder(radius=5)
# assert c.height == 1
# assert c.radius == 5
# c = Cylinder()
# assert c.height == 1
# assert c.radius == 1
# c = Cylinder(radius=2, height=10)
# assert pytest.approx(c.volume, 0.000001) == 40 * math.pi
# assert pytest.approx(c.surface_area, 0.000001) == 48 * math.pi
# c.widen(3)
# assert c.radius == 6
# c.stretch(2)
# assert c.height == 20
# assert pytest.approx(c.surface_area, 0.000001) == 312 * math.pi
# assert pytest.approx(c.volume, 0.000001) == 720 * math.pi
#
# def test_crypto():
# assert isinstance(make_crypto_functions('zombie devops feynman123', '0000000000000000'), tuple)
# e, d = make_crypto_functions('zombie devops feynman123', '0000000000000000')
# assert e(b'Hello......world') == b'\x15\x8a\xa5a\xd8\x07\\d(e\xc9\xbes*\x13\x9f'
# assert d(b'\x15\x8a\xa5a\xd8\x07\\d(e\xc9\xbes*\x13\x9f') == b'Hello......world'
# for s in [b'', b'\xfe9iP\x05\x22\x490opXZ@1##']:
# assert d(e(s)) == s
def test_random_name():
p = random_name(gender='female', region='canada')
assert isinstance(p, str)
assert len(p) > 3
assert ', ' in p
with pytest.raises(ValueError) as excinfo:
random_name(gender='fjweiuw', region='canada')
assert re.match(r'{"error":\s*"Invalid gender"}', str(excinfo.value))
``` |
{
"source": "JohnlNguyen/Comment2Code",
"score": 2
} |
#### File: Comment2Code/Code/code_data_reader.py
```python
import tensorflow as tf
import tensorflow_datasets as tfds
from collections import namedtuple
from pdb import set_trace
import yaml
import numpy as np
import os
import sys
import re
import random
import json
random.seed(42)
config = yaml.safe_load(open("config.yml"))
class CodeDataReader(object):
CodeBatch = namedtuple('CodeBatch', 'b_code a_code comment label')
def __init__(self, data_config, data_root, test_file=None, vocab_file=None):
self.config = data_config
self.train_data, self.valid_data, self.test_data = self.read(
data_root, test_file)
self.filter_max_length()
print("%d lines" % len(self.train_data))
self.get_vocab(vocab_file)
# Limit held-out data size
if sum(len(l) for l in self.valid_data) > 1000000:
random.shuffle(self.valid_data)
self.valid_data = self.valid_data[:250]
self.sample_len = lambda l: len(l[0]) + len(l[1])
def filter_max_length(self):
def is_long(x): return len(x['before_comment']) + len(x['before_code']) + len(
x['after_code']) > config['data']['max_sample_size']
self.train_data = [row for row in self.train_data if not is_long(row)]
self.valid_data = [row for row in self.valid_data if not is_long(row)]
# self.test_data = [row for row in self.test_data if not is_long(row)]
def get_vocab(self, vocab_path):
if not vocab_path:
self.vocabulary = tfds.features.text.SubwordTextEncoder.build_from_corpus(
self.generator(), target_vocab_size=2 ** 13)
self.vocabulary.save_to_file('./data/vocab_code_java')
else:
self.vocabulary = tfds.features.text.SubwordTextEncoder.load_from_file(
vocab_path)
def generator(self):
for line in self.train_data + self.valid_data:
yield line['before_comment']
yield line['after_comment']
yield self.clean_code(line['before_code'])
yield self.clean_code(line['after_code'])
def read(self, data_root, test_file=None):
with open(data_root, encoding='utf-8', errors='ignore') as f:
data = json.load(f)
data = [row for row in data if row['type']
== "BOTH" or row['type'] == "CODE"]
test_data = []
if test_file is not None:
with open(test_file, encoding='utf-8', errors='ignore') as f:
test_data = json.load(f)
# subset data
percent = float(self.config['percent'])
data = data[:int(len(data) * percent)]
train_data = data[:int(0.95 * len(data))]
valid_data = data[int(0.95 * len(data)):]
return train_data, valid_data, test_data
def batcher(self, mode="training"):
# b_indices, b_masks, a_indices, a_masks, c_indices, c_masks, label
ds = tf.data.Dataset.from_generator(self.batch_generator, output_types=(
tf.int32, tf.float32, tf.int32, tf.float32, tf.int32, tf.float32, tf.float32),
args=(mode,))
ds = ds.prefetch(buffer_size=1)
return ds
def make_batch(self, buffer):
def sample_len(x): return len(x.b_code) + \
len(x.a_code) + len(x.comment)
buffer = self.sort_buffer(buffer, sample_len)
batch = [[], [], [], []]
max_seq_len = 0
for ix, seq in enumerate(buffer):
max_seq_len = max(max_seq_len, sample_len(seq))
if (len(batch[0]) > 0 and len(batch[1]) > 0 and len(
batch[2]) > 0 and len(batch[3]) > 0) and max_seq_len * (len(batch[0]) + 1) > config['data']['max_batch_size']:
break
batch[0].append(seq.b_code)
batch[1].append(seq.a_code)
batch[2].append(seq.comment)
batch[3].append(seq.label)
assert len(batch[0]) > 0 and len(batch[1]) > 0 and len(
batch[2]) > 0 and len(batch[3]) > 0
b_code_indices, b_code_masks = self.gen_tensor(batch[0], dtype='int32')
a_code_indices, a_code_masks = self.gen_tensor(batch[1], dtype='int32')
comment_indices, comment_masks = self.gen_tensor(
batch[2], dtype='int32')
label = tf.constant(batch[3])
buffer = buffer[len(batch[0]):]
batch = (
b_code_indices, b_code_masks, a_code_indices, a_code_masks, comment_indices,
comment_masks, label
)
return buffer, batch
def batch_generator(self, mode="training"):
batch_data = self.setup_batch_gen(mode)
buffer = []
for line in batch_data:
assert line['type'] == "BOTH" or line['type'] == "CODE"
label = 1 if line['type'] == 'BOTH' else 0
b_code, a_code = line['before_code'], line['after_code']
comment = line['before_comment']
b_code = self.clean_code(b_code)
a_code = self.clean_code(a_code)
if len(comment) + len(b_code) + len(a_code) > config['data']['max_sample_size']:
continue
b_code = self.vocabulary.encode(b_code)
a_code = self.vocabulary.encode(a_code)
comment = self.vocabulary.encode(comment)
buffer.append(CodeDataReader.CodeBatch(
b_code, a_code, comment, label))
if len(buffer) > 0 and sum(len(l.b_code) + len(l.a_code) + len(l.comment) for l in buffer) > config['data'][
'max_batch_size']:
buffer, batch = self.make_batch(buffer)
yield batch
while buffer:
buffer, batch = self.make_batch(buffer)
if not batch:
break
yield batch
def clean_code(self, code):
return "\\n".join(code).replace("\n", "\\n")
def gen_tensor(self, data, dtype='int32'):
return tf.ragged.constant(data, dtype=dtype).to_tensor(), tf.sequence_mask([len(l) for l in data],
dtype=tf.dtypes.float32)
def sort_buffer(self, buffer, sample_len):
pivot = sample_len(random.choice(buffer))
return sorted(buffer, key=lambda b: abs(sample_len(b) - pivot))
def setup_batch_gen(self, mode):
if isinstance(mode, bytes):
mode = mode.decode("utf-8")
if mode == "training":
# batch_data = self.train_data
both = [row for row in self.train_data if row["type"] == "BOTH"]
code = [row for row in self.train_data if row["type"] == "CODE"]
batch_data = both + random.sample(code, len(both))
random.shuffle(batch_data)
elif mode == "valid":
both = [row for row in self.valid_data if row["type"] == "BOTH"]
code = [row for row in self.valid_data if row["type"] == "CODE"]
batch_data = both + random.sample(code, len(both))
elif mode == "test":
batch_data = [{**row, **{
'type': "BOTH" if row['label'] == "1" else "CODE"}
}
for row in self.test_data
]
return batch_data
```
#### File: Comment2Code/Code/data_reader.py
```python
import numpy as np
import os
import sys
import re
import random
import json
random.seed(42)
import yaml
config = yaml.safe_load(open("config.yml"))
from vocabulary import VocabularyBuilder, BPE, SubwordTextEncoder
from pdb import set_trace
from collections import namedtuple
from util import get_data
import tensorflow as tf
class DataReader(object):
BothBatch = namedtuple('BothBatch', 'b_tokens a_tokens code_tokens label')
Batch = namedtuple('Batch', 'comment_tokens code_tokens label')
AllBatch = namedtuple('AllBatch', 'b_com a_com a_cod b_cod label num_tokens')
CodeBatch = namedtuple('CodeBatch', 'b_code a_code comment label')
def __init__(self, data_config, vocab_config, data_root, vocab_file):
self.config = data_config
self.train_data, self.valid_data, self.test_data = self.read(data_root)
print("%d lines" % len(self.train_data))
self.get_vocab(vocab_config, vocab_file)
# Limit held-out data size
if sum(len(l) for l in self.valid_data) > 1000000:
random.shuffle(self.valid_data)
self.valid_data = self.valid_data[:250]
self.sample_len = lambda l: len(l[0]) + len(l[1])
# stats for data
self.long_sample_count = 0
def get_vocab(self, vocab_config, vocab_file):
if vocab_config["tokenizer"] == "bpe":
self.vocabulary = BPE(vocab_config, vocab_path=vocab_file)
else:
self.vocabulary = SubwordTextEncoder(vocab_config, vocab_path=vocab_file)
def train_tokens(self):
yield from get_data(self.train_data)
def read(self, data_root):
if os.path.isdir(data_root):
data = []
for file in os.listdir(data_root):
with open(os.path.join(data_root, file), encoding='utf-8', errors='ignore') as f:
data.append(json.load(f))
train_data = [k for l in data[:int(0.9 * len(data))] for k in l]
valid_data = [k for l in data[int(0.9 * len(data)):int(0.95 * len(data))] for k in l]
test_data = [k for l in data[int(0.95 * len(data))] for k in l]
return train_data, valid_data, test_data
else:
with open(data_root, encoding='utf-8', errors='ignore') as f:
data = json.load(f)
# subset data
percent = float(self.config['percent'])
data = data[:int(len(data) * percent)]
# only using both changes, an ensure all
data = [x for x in data if x['type'] == "BOTH" and all(x.values())]
# self.get_project_idx(data)
# self.get_file_idx(data)
train_data = data[:int(0.95 * len(data))]
valid_data = data[int(0.95 * len(data)):]
test_data = []
return train_data, valid_data, test_data
def batcher(self, mode="training", input_type="both"):
if input_type == "all":
ds = tf.data.Dataset.from_generator(self.batch_generator, output_types=(
tf.int32, tf.float32, tf.int32, tf.float32, tf.int32, tf.float32, tf.int32, tf.float32, tf.float32),
args=(mode, input_type,))
elif input_type == "code":
# b_indices, b_masks, a_indices, a_masks, c_indices, c_masks, leak_indices, leak_masks, label
ds = tf.data.Dataset.from_generator(self.batch_generator, output_types=(
tf.int32, tf.float32, tf.int32, tf.float32, tf.int32, tf.float32, tf.float32),
args=(mode, input_type,))
elif input_type == "both":
# b_indices, b_masks, a_indices, a_masks, c_indices, c_masks, label
ds = tf.data.Dataset.from_generator(self.batch_generator, output_types=(
tf.int32, tf.float32, tf.int32, tf.float32, tf.int32, tf.float32, tf.float32), args=(mode, input_type,))
else:
# comment_indices, comment_masks, code_indices, code_masks, label
ds = tf.data.Dataset.from_generator(self.batch_generator, output_types=(
tf.int32, tf.float32, tf.int32, tf.float32, tf.float32), args=(mode, input_type,))
ds = ds.prefetch(buffer_size=1)
return ds
def stats(self):
print("Long Seq {}".format(self.long_sample_count))
print("Num OOV {}".format(self.vocabulary.num_oov))
def make_batch(self, buffer, input_type="both"):
if input_type == "code":
sample_len = lambda x: len(x.b_code) + len(x.a_code) + len(x.comment)
buffer = self.sort_buffer(buffer, sample_len)
batch = [[], [], [], []]
max_seq_len = 0
for ix, seq in enumerate(buffer):
max_seq_len = max(max_seq_len, sample_len(seq))
if max_seq_len * (len(batch[0]) + 1) > config['data']['max_batch_size']:
break
batch[0].append(seq.b_code)
batch[1].append(seq.a_code)
batch[2].append(seq.comment)
batch[3].append(seq.label)
b_code_indices, b_code_masks = self.gen_tensor(batch[0], dtype='int32')
a_code_indices, a_code_masks = self.gen_tensor(batch[1], dtype='int32')
comment_indices, comment_masks = self.gen_tensor(batch[2], dtype='int32')
label = tf.constant(batch[3])
buffer = buffer[len(batch[0]):]
batch = (
b_code_indices, b_code_masks, a_code_indices, a_code_masks, comment_indices,
comment_masks, label
)
return buffer, batch
elif input_type == "both":
sample_len = lambda x: len(x.b_tokens) + len(x.a_tokens) + len(x.code_tokens)
buffer = self.sort_buffer(buffer, sample_len)
batch = [[], [], [], []]
max_seq_len = 0
for ix, seq in enumerate(buffer):
max_seq_len = max(max_seq_len, sample_len(seq))
if max_seq_len * (len(batch[0]) + 1) > config['data']['max_batch_size']:
break
batch[0].append([self.vocabulary.vocab_key(s) for s in seq.b_tokens])
batch[1].append([self.vocabulary.vocab_key(s) for s in seq.a_tokens])
batch[2].append([self.vocabulary.vocab_key(s) for s in seq.code_tokens])
batch[3].append(seq.label)
b_comment_indices, b_comment_masks = self.gen_tensor(batch[0])
a_comment_indices, a_comment_masks = self.gen_tensor(batch[1])
code_indices, code_masks = self.gen_tensor(batch[2])
label = tf.constant(batch[3])
buffer = buffer[len(batch[0]):]
batch = (b_comment_indices, b_comment_masks, a_comment_indices, a_comment_masks, code_indices,
code_masks, label)
return buffer, batch
else:
sample_len = lambda x: len(x.comment_tokens) + len(x.code_tokens)
buffer = self.sort_buffer(buffer, sample_len)
max_seq_len = 0
batch = [[], [], []]
for ix, seq in enumerate(buffer):
max_seq_len = max(max_seq_len, len(seq.comment_tokens) + len(seq.code_tokens))
if max_seq_len * (len(batch[0]) + 1) > config['data']['max_batch_size']:
break
batch[0].append(seq.comment_tokens)
batch[1].append(seq.code_tokens)
batch[2].append(seq.label)
comment_indices, comment_masks = self.gen_tensor(batch[0])
code_indices, code_masks = self.gen_tensor(batch[1])
label = tf.constant(batch[2])
buffer = buffer[len(batch[0]):]
batch = (comment_indices, comment_masks, code_indices, code_masks, label)
return buffer, batch
def batch_generator(self, mode="training", input_type="both"):
batch_data, input_type = self.setup_batch_gen(input_type, mode)
buffer = []
for line in batch_data:
label = round(random.random())
if int(line['after_line']) < 10 or int(line['before_line']) < 10: continue
if input_type == "code":
b_code, a_code = line['before_code'], line['after_code']
comment = line['before_comment'] if label == 0 else line['after_comment']
if label == 0:
assert comment == line['before_comment']
b_code = self.clean_code(b_code)
a_code = self.clean_code(a_code)
comment = self.clean_comment(comment)
if len(comment) + min(len(b_code), len(a_code)) > config['data']['max_sample_size']:
self.long_sample_count += 1
continue
b_code = self.vocabulary.transform(b_code)
a_code = self.vocabulary.transform(a_code)
comment = self.vocabulary.transform(comment)
buffer.append(DataReader.CodeBatch(b_code, a_code, comment, label))
if sum(len(l.b_code) + len(l.a_code) + len(l.comment) for l in buffer) > 75 * config['data'][
'max_batch_size']:
buffer, batch = self.make_batch(buffer, input_type)
yield batch
# using both before and after comment
elif input_type == "both":
item = self.gen_both_batch(line, label)
if len(item.code_tokens) + min(len(item.b_tokens), len(item.a_tokens)) > config['data'][
'max_sample_size']:
self.long_sample_count += 1
continue
buffer.append(item)
if sum(len(l.b_tokens) + len(l.a_tokens) + len(l.code_tokens) for l in buffer) > 75 * config['data'][
'max_batch_size']:
buffer, batch = self.make_batch(buffer, input_type)
yield batch
# swap with other a random point
elif input_type == "random":
if label == 0:
comment_k, code_k = 'after_comment', 'before_code'
swap = random.choice(self.train_data)
comment, code = line[comment_k], swap[code_k]
else:
comment_k, code_k = ('after_comment', 'after_code')
comment, code = line[comment_k], line[code_k]
# comment_tokens, code_tokens = self.tokenize(comment, code)
comment_tokens = self.vocabulary.transform(comment)
code_tokens = self.vocabulary.transform(code)
if len(code_tokens) + len(comment_tokens) > config['data']['max_sample_size']:
self.long_sample_count += 1
continue
buffer.append(DataReader.Batch(comment_tokens, code_tokens, label))
if sum(len(l.comment_tokens) + len(l.code_tokens) for l in buffer) > 50 * config['data'][
'max_batch_size']:
buffer, batch = self.make_batch(buffer, input_type)
yield batch
# swap before with after comment
else:
if label == 0:
comment_k, code_k = 'after_comment', 'before_code'
comment, code = line[comment_k], line[code_k]
else:
comment_k, code_k = ('after_comment', 'after_code')
comment, code = line[comment_k], line[code_k]
comment_tokens, code_tokens = self.tokenize(comment, code)
if len(code_tokens) + len(comment_tokens) > config['data']['max_sample_size']:
self.long_sample_count += 1
continue
buffer.append(DataReader.Batch(comment_tokens, code_tokens, label))
if sum(len(l.comment_tokens) + len(l.code_tokens) for l in buffer) > 50 * config['data'][
'max_batch_size']:
buffer, batch = self.make_batch(buffer, input_type)
yield batch
while buffer:
buffer, batch = self.make_batch(buffer, input_type)
if not batch:
break
yield batch
def gen_both_batch(self, line, label):
b_comment, a_comment = line['before_comment'], line['after_comment']
if label == 0:
# swap_line = random.choice(self.train_data) # TODO: need to take this out
# code = swap_line['before_code']
code = line['before_code']
else:
code = line['after_code']
b_comment = self.clean_comment(b_comment)
a_comment = self.clean_comment(a_comment)
code = self.clean_code(code)
b_comment_tokens = self.vocabulary.tokenize(b_comment)
a_comment_tokens = self.vocabulary.tokenize(a_comment)
code_tokens = self.vocabulary.tokenize(code)
return DataReader.BothBatch(b_comment_tokens, a_comment_tokens, code_tokens, label)
def get_project_idx(self, data):
self.project_lines = {}
for ix, row in enumerate(data):
project_id = self.build_project_id(row)
if project_id not in self.project_lines:
self.project_lines[project_id] = [row]
else:
self.project_lines[project_id].append(row)
def get_file_idx(self, data):
self.file_lines = {}
for ix, row in enumerate(data):
project_id = row['after_path']
if project_id not in self.file_lines:
self.file_lines[project_id] = [row]
else:
self.file_lines[project_id].append(row)
def build_project_id(self, row):
org, project, commit, file = row['after_path'].split("#")
project_id = "#".join([org, project])
return project_id
def tokenize(self, comment, code):
comment = self.clean_comment(comment)
code = self.clean_code(code)
comment_tokens = self.vocabulary.tokenize(comment)
code_tokens = self.vocabulary.tokenize(code)
return comment_tokens, code_tokens
def clean_code(self, code):
return "\\n".join(code).replace("\n", "\\n")
def clean_comment(self, comment):
return comment.replace("\n", "\\n")
def gen_tensor(self, data, dtype='int32'):
return tf.ragged.constant(data, dtype=dtype).to_tensor(), tf.sequence_mask([len(l) for l in data],
dtype=tf.dtypes.float32)
def sort_buffer(self, buffer, sample_len):
pivot = sample_len(random.choice(buffer))
return sorted(buffer, key=lambda b: abs(sample_len(b) - pivot))
@classmethod
def swap_keys(cls, label, line):
if label == 0:
if line["type"] == "BOTH":
swap_dir = round(random.random())
comment, code = ("before_comment", "after_code") if swap_dir == 0 else (
"after_comment", "before_code")
else:
comment, code = "before_comment", "after_code"
else:
if line["type"] == "BOTH":
swap_dir = round(random.random())
comment, code = ("before_comment", "before_code") if swap_dir == 0 else (
"after_comment", "after_code")
else:
comment, code = "after_comment", "after_code"
return comment, code
def setup_batch_gen(self, input_type, mode):
if isinstance(mode, bytes):
mode = mode.decode("utf-8")
if isinstance(input_type, bytes):
input_type = input_type.decode("utf-8")
if mode == "training":
batch_data = self.train_data
random.shuffle(batch_data)
elif mode == "valid":
batch_data = self.valid_data
else:
batch_data = self.test_data
return batch_data, input_type
class SimilarityDataReader(DataReader):
def __init__(self, data_config, vocab_config, data_root, vocab_file):
super(SimilarityDataReader, self).__init__(data_config, vocab_config, data_root, vocab_file)
def make_batch(self, buffer, input_type="all"):
assert str(input_type) == "all"
sample_len = lambda x: sum([len(x.b_com), len(x.a_com), len(x.b_cod), len(x.a_cod)])
buffer = self.sort_buffer(buffer, sample_len)
max_seq_len = 0
batch = [[], [], [], [], []]
for ix, seq in enumerate(buffer):
max_seq_len = max(max_seq_len, sample_len(seq))
if max_seq_len * (len(batch[0]) + 1) > config['data']['max_batch_size']:
break
batch[0].append(seq.b_com)
batch[1].append(seq.a_com)
batch[2].append(seq.b_cod)
batch[3].append(seq.a_cod)
batch[4].append(seq.label)
b_com_idx, b_com_masks = self.gen_tensor(batch[0], dtype='int32')
a_com_idx, a_com_masks = self.gen_tensor(batch[1], dtype='int32')
b_cod_idx, b_cod_masks = self.gen_tensor(batch[2], dtype='int32')
a_cod_idx, a_cod_masks = self.gen_tensor(batch[3], dtype='int32')
label = tf.constant(batch[4], shape=(len(batch[4]) * 4), dtype='float32')
batch = (
b_com_idx, b_com_masks, a_com_idx, a_com_masks, b_cod_idx, b_cod_masks, a_cod_idx, a_cod_masks, label
)
buffer = buffer[len(batch[0]):]
return buffer, batch
def batch_generator(self, mode="training", input_type="all"):
batch_data, input_type = self.setup_batch_gen(input_type, mode)
assert input_type == "all"
buffer = []
for line in batch_data:
label = round(random.random())
if int(line['after_line']) < 10 or int(line['before_line']) < 10:
continue
b_com, a_com = self.clean_comment(line['before_comment']), self.clean_comment(line['after_comment'])
b_cod, a_cod = self.clean_code(line['before_code']), self.clean_code(line['after_code'])
b_com, a_com = self.vocabulary.transform(b_com), self.vocabulary.transform(a_com)
b_cod, a_cod = self.vocabulary.transform(b_cod), self.vocabulary.transform(b_cod)
num_tokens = [len(b_com), len(a_com), len(b_cod), len(a_cod)]
if max(num_tokens) > config['data']['max_sample_size']:
self.long_sample_count += 1
continue
# swap direction
labels = [1, 0, 0, 1] # bb ba ab aa
if label == 0:
b_com, a_com = a_com, b_com
labels = [0, 1, 1, 0] # ab aa bb ba
# swap_code = round(random.random())
# if swap_code == 0:
# # swap before comment with after comment
# b_com, a_com = a_com, b_com
# labels = [0, 1, 1, 0] # ab aa bb ba
# else:
# b_cod, a_cod = a_cod, b_cod
# labels = [0, 1, 1, 0] # ba bb aa ab
buffer.append(DataReader.AllBatch(b_com, a_com, b_cod, a_cod, label=labels,
num_tokens=sum(num_tokens)))
if sum([x.num_tokens for x in buffer]) > 5 * config['data'][
'max_batch_size']:
buffer, batch = self.make_batch(buffer, input_type)
yield batch
while buffer:
buffer, batch = self.make_batch(buffer, input_type)
if not batch:
break
yield batch
def random_swap(self, label, line):
labels = [1, 0, 0, 1] # bb ba ab aa
if label == 0:
swap_line = random.choice(self.train_data)
line['before_comment'], line['after_comment'] = swap_line['before_comment'], swap_line['after_comment']
swap_code = round(random.random())
if swap_code == 0:
line['after_code'] = swap_line['after_code']
labels = [0, 0, 0, 1]
else:
line['before_code'] = swap_line['before_code']
labels = [1, 0, 0, 0]
return line, labels
def swap_within_project(self, label, line):
labels = [1, 0, 0, 1] # bb ba ab aa
if label == 0:
proj_id = self.build_project_id(line)
swap_line = random.choice(self.project_lines[proj_id])
line['before_comment'], line['after_comment'] = swap_line['before_comment'], swap_line['after_comment']
swap_code = round(random.random())
if swap_code == 0:
line['after_code'] = swap_line['after_code']
labels = [0, 0, 0, 1]
else:
line['before_code'] = swap_line['before_code']
labels = [1, 0, 0, 0]
return line, labels
```
#### File: Comment2Code/Code/log_saver.py
```python
import os
import tensorflow as tf
class LogSaver:
def __init__(self, logs_path, model_name, dateset_name, mode):
if not os.path.isdir(logs_path):
os.makedirs(logs_path)
self.train_writer = tf.summary.create_file_writer(
'{}/{}/{}/{}/train/'.format(logs_path, dateset_name, model_name, mode))
self.valid_writer = tf.summary.create_file_writer(
'{}/{}/{}/{}/valid/'.format(logs_path, dateset_name, model_name, mode))
def log_train(self, loss, acc, bce, global_step):
with self.train_writer.as_default():
tf.summary.scalar('loss', loss, step=global_step)
tf.summary.scalar('acc', acc, step=global_step)
tf.summary.scalar('entropy', bce, step=global_step)
def log_valid(self, acc, bce, global_step):
with self.valid_writer.as_default():
tf.summary.scalar('acc', acc, step=global_step)
tf.summary.scalar('entropy', bce, step=global_step)
```
#### File: Comment2Code/Code/util.py
```python
import re
import string
import numpy as np
import tensorflow as tf
# Based on https://github.com/DongjunLee/transformer-tensorflow/blob/master/transformer/attention.py
pos_cache = None
def positional_encoding(dim, sentence_length, dtype=tf.float32):
global pos_cache
if pos_cache != None and pos_cache[1] == dim:
if pos_cache[0] == sentence_length:
return pos_cache[2]
elif pos_cache[0] > sentence_length:
return pos_cache[2][:sentence_length]
encoded_vec = np.array([pos / np.power(10000, 2 * i / dim)
for pos in range(sentence_length) for i in range(dim)])
encoded_vec[::2] = np.sin(encoded_vec[::2])
encoded_vec[1::2] = np.cos(encoded_vec[1::2])
pos_enc = tf.constant(encoded_vec.reshape(
[sentence_length, dim]), dtype=dtype)
pos_cache = (sentence_length, dim, pos_enc)
return pos_enc
def compute_transformer_learning_rate(base_lr, hidden_dim, warmup_steps, curr_step):
learning_rate = base_lr * (hidden_dim ** -0.5)
learning_rate *= tf.minimum(1.0, curr_step / warmup_steps)
learning_rate *= tf.math.rsqrt(tf.cast(tf.maximum(curr_step,
warmup_steps), "float32"))
return learning_rate
def tensor_matrix_mul(t, m):
return tf.reshape(tf.reshape(t, [-1, t.shape[-1]]) @ m, [-1, t.shape[1], m.shape[-1]])
def merge(map, key, value, wrap_fn=None):
if key in map:
if isinstance(map[key], int) or isinstance(map[key], float):
map[key] += value
elif isinstance(map[key], list):
map[key].append(value)
elif isinstance(map[key], set):
map[key].add(value)
else:
print("Unsure how to add", value, "to", map[key])
else:
map[key] = value if wrap_fn is None else wrap_fn(value)
def get_data(data):
for line in data:
yield line['before_comment']
yield line['after_comment']
yield line['before_code']
yield line['after_code']
```
#### File: Comment2Code/src/code_crawler.py
```python
import argparse
import csv
import linecache
import os
import pickle
import traceback
import sys
import subprocess
import ray
import itertools
import pygments
import json
from collections import namedtuple
from pathlib import Path
from pdb import set_trace
from git_crawler import get_postcommit_file, get_precommit_file, ADDED_MODE, REMOVED_MODE, GitChange
from lexer import strip_special_chars, build_lexer, lex_content
from pygments.token import Comment, Text, Keyword, Operator
from clean import *
from utils import *
CommentCodeRow = namedtuple(
'CommentCodeRow',
['before_comment', 'before_code', 'before_heuristic', 'after_comment', 'after_code', 'after_heuristic',
'before_path', 'after_path', 'before_line', 'after_line', 'type', 'commit'])
DATA_DIR = "../data"
class Heuristic(object):
"""
- Get code up to the next blank line (storing extra 10 lines ahead)
- Get code till the end of the function (return statement)
- Get code if code and comment are on the same line
- Get code up to the next comment
"""
BLANK_LINE = "BLANK_LINE"
END_OF_FUNCTION = "END_OF_FUNCTION"
SAME_LINE = "SAME_LINE"
NEXT_COMMENT = "NEXT_COMMENT"
CLOSE_PAREN = "CLOSE_PAREN"
LOOK_AHEAD_LINES = 11 # look ahead 10 lines
@classmethod
def should_stop_java(cls, tokens):
for ttype, token in tokens:
# end of function heuristic
if ttype == Keyword and token == 'return':
return True, Heuristic.END_OF_FUNCTION
# end of function heuristic
if token == '}':
return True, Heuristic.CLOSE_PAREN
# next comment heuristic
if ttype in Comment:
return True, Heuristic.NEXT_COMMENT
return False, None
@classmethod
def should_stop(cls, tokens):
# new line heuristic
if len(tokens) == 1 and tokens[0][0] == Text and tokens[0][1] == "\n":
return True, Heuristic.BLANK_LINE
for ttype, token in tokens:
# end of function heuristic
if ttype == Keyword and token == 'return':
return True, Heuristic.END_OF_FUNCTION
# next comment heuristic
if ttype in Comment:
return True, Heuristic.NEXT_COMMENT
return False, None
def path_to_file(repo_dir, org, project):
return os.path.join(repo_dir, org, project)
def create_file_name(org, project, file, commit, is_added=False):
file_name = "{}#{}#{}#{}".format(
org, project, commit, file.replace("/", "__"))
if is_added:
return Path(os.path.join(DATA_DIR, "files-post", file_name)), file_name
else:
return Path(os.path.join(DATA_DIR, "files-pre", file_name)), file_name
def extract_code(start_lineno, file_name):
with open(file_name.as_posix(), mode='r', encoding='iso-8859-1') as f:
content = f.readlines()
lexer = build_lexer('java')
# content array is 0 index so need to shift down by 1
start_lineno = max(0, start_lineno - 1)
comment, comment_end = capture_comment(content, lexer, start_lineno)
to_extract_content = content[comment_end:]
code_end = 1
heuristic = None
block_count = 0
for i, line in enumerate(to_extract_content):
tokens = list(pygments.lex(line, lexer))
should_stop, reason = Heuristic.should_stop_java(tokens)
if should_stop and block_count == 0:
heuristic = reason
code_end = i
break
if heuristic == Heuristic.CLOSE_PAREN:
code_end = min(code_end, len(to_extract_content))
code = capture_code(code_end, lexer, to_extract_content)
comment = strip_special_chars(comment)
elif heuristic == Heuristic.NEXT_COMMENT:
code_end = min(code_end, len(to_extract_content))
code = capture_code(code_end, lexer, to_extract_content)
comment = strip_special_chars(comment)
else:
code_end = min(code_end + 5, len(to_extract_content))
code = capture_code(code_end + 1, lexer, to_extract_content)
comment = strip_special_chars(comment)
# if "Close the server and confirm it saw what we expected." in comment:
# set_trace()
# skipping comment and code are on the same line case
# if not comment:
# if len(content) - 1 < start_lineno:
# print("Length of content is less than start_line {}".format(
# file_name.as_posix()))
# return None, None, None
# ttypes = [t for t, _ in pygments.lex(content[start_lineno], lexer)]
# if is_a_code_line(ttypes) and contains_a_comment(ttypes):
# line = content[start_lineno].split("//")
# if len(line) != 2:
# return None, None, None
# code, comment = line[:-1], line[-1]
# code = [w.strip() for w in code]
# comment = comment.strip().replace("\n", "\\n")
return clean_comment(comment), clean_code(code), heuristic
def capture_code(code_end, lexer, to_extract_content):
code = []
for line in to_extract_content[:code_end]:
ttypes = [t for t, _ in pygments.lex(line, lexer)]
line = line.strip()
if is_a_code_line(ttypes) and '/*' not in line and not line.startswith('*') and "//" not in line:
code.append(line)
return code
def capture_comment(content, lexer, start):
# look backward to capture the entire comment in case we are the middle of a multiline comment
comment_start = comment_end = start
for line in reversed(content[:start]):
ttypes = [t for t, _ in pygments.lex(line, lexer)]
# if a line has no keyword, name or operator
# and has a comment token we assume it is a part of the initial comment
if is_a_comment_line_java(ttypes):
comment_start -= 1
else:
break
# look forward to capture the entire comment in case we are the middle of a multiline comment
for line in content[start:]:
ttypes = [t for t, _ in pygments.lex(line, lexer)]
if is_a_comment_line_java(ttypes):
comment_end += 1
else:
break
comment = content[comment_start:comment_end]
return comment, comment_end
def get_commit_file(path, commit_id, out_file):
return subprocess.run(['git', 'show', commit_id], cwd=path, stdout=out_file)
@ray.remote
def get_commit_files_and_extract(csv_file_name, repo_dir):
comment_code_rows = []
try:
print("Extracting code for {}".format(csv_file_name))
with open(csv_file_name, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
# using csv header as namedtuple fields
DataRow = namedtuple('DataRow', next(reader))
for row in map(DataRow._make, reader):
full_bf_name, bf_name = create_file_name(
row.org, row.project, row.before_path, row.before_commit, is_added=False)
full_af_name, af_name = create_file_name(
row.org, row.project, row.after_path, row.after_commit, is_added=True)
get_commit_file(
path=path_to_file(repo_dir, row.org, row.project),
commit_id=row.before_commit,
out_file=full_af_name.open('w'),
),
get_commit_file(
path=path_to_file(repo_dir, row.org, row.project),
commit_id=row.after_commit,
out_file=full_bf_name.open('w')
)
bf_comment, bf_code, bf_heuristic = extract_code(
int(row.added_line), full_af_name)
af_comment, af_code, af_heuristic = extract_code(
int(row.rm_line), full_bf_name)
if (
(af_comment == bf_comment and bf_code == af_code)
or
(not af_comment or not bf_comment or not bf_code or not af_code)
):
continue
if af_comment == bf_comment and bf_code != af_code:
change_type = GitChange.CODE
elif af_comment != bf_comment and bf_code != af_code:
change_type = GitChange.BOTH
else:
change_type = GitChange.COMMENT
comment_code_rows.append(CommentCodeRow(bf_comment, bf_code, bf_heuristic, af_comment, af_code,
af_heuristic, bf_name, af_name, row.added_line, row.rm_line,
change_type, row.commit)._asdict())
except Exception as e:
print("Exception processing", csv_file_name,
"--", traceback.print_exc(file=sys.stdout))
print("Finished extracting code for {}".format(csv_file_name))
return comment_code_rows
def write_data(rows, file_name):
if not rows:
return 0
with open(DATA_DIR + '/Pairs/{}.json'.format(file_name), 'w+') as f:
json.dump(rows, f)
print("Comment-Code Pairs written {} for {}".format(len(rows), file_name))
return len(rows)
def parse_args():
parser = argparse.ArgumentParser(
description="Extract code from commit files")
parser.add_argument("-d", "--dir", required=False, default="../data/Diffs",
help="Directory to extract code from")
parser.add_argument("-r", "--repos", required=False,
default="../data/Repos")
return parser.parse_args()
def main(dir_path, repo_dir):
diff_list = os.listdir(dir_path)
total = 0
result_ids = []
for idx, csv_diff_file in enumerate(diff_list):
path = os.path.join(dir_path, csv_diff_file)
result_ids.append(get_commit_files_and_extract.remote(path, repo_dir))
results = list(itertools.chain.from_iterable(ray.get(result_ids)))
write_data(results, 'code_comment_{}'.format(len(results)))
total += len(results)
if __name__ == '__main__':
args = parse_args()
diffs_dir = args.dir
repos_dir = args.repos
ray.init(num_cpus=os.cpu_count() // 2)
import time
s = time.perf_counter()
main(diffs_dir, repos_dir)
elapsed = time.perf_counter() - s
print("{} executed in {:0.2f} seconds.".format(__file__, elapsed))
```
#### File: src/tests/test_code_crawler.py
```python
from code_crawler import extract_code, capture_comment
from pathlib import Path
from lexer import build_lexer
import linecache
def tests():
path = Path('./test_file.py')
lexer = build_lexer()
comment, code, h = extract_code(1, path)
print("-" * 10)
print(comment)
print(code)
assert comment == "# test case 1, starting at line 2\\n"
assert code == [['def', 'hello', '(', ')', ':'], []]
comment, code, h = extract_code(19, path)
print("-" * 10)
assert comment == "# ImportError: the machinery told us it does not exist\\n# ValueError:\\n# - the module name was invalid\\n# - the module name is __main__\\n# - *we* raised `ValueError` due to `spec` being `None`\\n"
assert code == [['except', '(', 'ImportError', ',', 'ValueError', ')', ':'], []]
print(comment)
print(code)
comment, code, h = extract_code(40, path)
print("-" * 10)
assert comment == "# we were unable to find the `package_path` using PEP 451 loaders\\n"
assert code == [['loader', '=', 'pkgutil', '.', 'get_loader', '(', 'root_mod_name', ')'],
['if', 'loader', 'is', 'None', 'or', 'import_name', '==', '"str"', ':'],
['if', 'loader', 'is', 'None', 'or', 'root_mod_name', '==', '"str"', ':'], []]
print(comment)
print(code)
comment, code, h = extract_code(52, path)
print("-" * 10)
print(comment)
print(code)
assert comment == "# Google App Engine's HardenedModulesHook\\n#\\n# Fall back to imports.\\n"
assert code == [['if', 'x', '==', 'True', ':'], ['x', '=', '1'], []]
comment, code, h = extract_code(61, path)
print("-" * 10)
print(comment)
print(code)
comment, code, h = extract_code(64, path)
print("-" * 10)
print(comment)
print(code)
print("-" * 10)
content = linecache.getlines(path.as_posix())
comment, comment_end = capture_comment(content, lexer, 0)
assert comment == ['# test case 1, starting at line 2\n']
if __name__ == '__main__':
tests()
``` |
{
"source": "JohnlNguyen/FLSim",
"score": 3
} |
#### File: flsim/channels/communication_stats.py
```python
from enum import Enum, auto
from flsim.utils.fl.stats import RandomVariableStatsTracker
class ChannelDirection(Enum):
CLIENT_TO_SERVER = auto()
SERVER_TO_CLIENT = auto()
class ChannelStatsCollector:
def __init__(self):
self.reset_channel_stats()
def reset_channel_stats(self):
self.communication_stats = {
ChannelDirection.CLIENT_TO_SERVER: RandomVariableStatsTracker(),
ChannelDirection.SERVER_TO_CLIENT: RandomVariableStatsTracker(),
}
def get_channel_stats(self):
return self.communication_stats
def collect_channel_stats(
self, message_size_bytes: float, client_to_server: bool = True
):
"""
Collect statistics about the updates/model transmitted both
for client to server and server to client directions.
"""
direction = (
ChannelDirection.CLIENT_TO_SERVER
if client_to_server
else ChannelDirection.SERVER_TO_CLIENT
)
self.communication_stats[direction].update(message_size_bytes)
```
#### File: flsim/channels/scalar_quantization_channel.py
```python
from __future__ import annotations
from collections import OrderedDict
from dataclasses import dataclass
import torch
from flsim.channels.base_channel import (
IdentityChannel,
FLChannelConfig,
Message,
)
from flsim.utils.config_utils import fullclassname
from flsim.utils.config_utils import init_self_cfg
from torch.quantization.observer import (
MinMaxObserver,
PerChannelMinMaxObserver,
)
class ScalarQuantizationChannel(IdentityChannel):
"""
Implements a channel that emulates scalar quantization from 1 to 8
bits per weight (8 bits per weight corresponds to int8 quantization).
We emulate this by successively quantizing and dequantizing. This way,
the rest of the training is transparent for aggreagators, reducers,
trainers and so on.
Notes:
- We can perform either per_tensor quantization (same scale and
zero_point for every parameters in a weight matrix) or per_channel
quantization (each channel has its own scale and zero_point). Set
quantize_per_tensor = False to perform per_channel quantization.
- We rely on the very simple MinMax observers for both per_tensor
and per_channel quantization. This can be refined by leveraging the
HistogramObserver for instance.
- We do not quantize the biases for the moment since their compression
overhead is very small.
- We arbitrarily choose to set the int_repr() of a quantized tensor
to [0, 2 ** n_bits - 1]. This is called unsigned quantization.
- All the quantized tensors share the same type, ie `torch.quint8`.
However, when quantizing to less than 8 bits, this is not memory
efficient since each element is stored over 1 byte anyway. Since
we are interested only in emulation for the moment, that's good.
We could also have relied on the fake_quantize primitives but we
prefer to rely on the true quantization operators.
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=ScalarQuantizationChannelConfig,
**kwargs,
)
super().__init__(**kwargs)
self.n_bits = self.cfg.n_bits
self.quantize_per_tensor = self.cfg.quantize_per_tensor
assert (
0 < self.n_bits <= 8
), "ScalarQuantizationChannel expects n_bits between 1 and 8 (included)."
self.quant_min = 0
self.quant_max = 2 ** self.n_bits - 1
self.observer, self.quantizer = self._get_observers_and_quantizers()
def _calc_message_size_client_to_server(self, message: Message):
"""
We compute the size of the compressed message as follows:
- for the weights (compressed): n_bits / 8 bytes per element
- for the biases (not compressed): 4 bytes per element
- for the scales (one for each layer or one for each layer channel
depending on quantize_per_tensor): 8 bytes / element (fp64)
- for the zerp_points (one for each layer or one for each layer channel
depending on quantize_per_tensor): 4 bytes / element (int32)
"""
message_size_bytes = 0
for param in message.model_state_dict.values():
if param.ndim > 1:
# integer representation
message_size_bytes += param.numel() * self.n_bits / 8
# size of scale(s) (fp64) and zero_point(s) (int32)
if self.quantize_per_tensor:
message_size_bytes += ScalarQuantizationChannel.BYTES_PER_FP64
message_size_bytes += ScalarQuantizationChannel.BYTES_PER_FP32
else:
# pyre-ignore[16]: `torch.Tensor` has no attribute `q_per_channel_scales`
n_scales = param.q_per_channel_scales().numel()
# pyre-ignore[16]: `torch.Tensor` has no attribute `q_per_channel_zero_points`
n_zero_points = param.q_per_channel_zero_points().numel()
message_size_bytes += (
ScalarQuantizationChannel.BYTES_PER_FP64 * n_scales
)
message_size_bytes += (
ScalarQuantizationChannel.BYTES_PER_FP32 * n_zero_points
)
else:
message_size_bytes += 4 * param.numel()
return message_size_bytes
def _get_observers_and_quantizers(self):
if self.quantize_per_tensor:
observer = MinMaxObserver(
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
quant_min=self.quant_min,
quant_max=self.quant_max,
reduce_range=False,
)
quantizer = torch.quantize_per_tensor
else:
observer = PerChannelMinMaxObserver(
dtype=torch.quint8,
qscheme=torch.per_channel_affine,
quant_min=self.quant_min,
quant_max=self.quant_max,
reduce_range=False,
ch_axis=0,
)
quantizer = torch.quantize_per_channel
return observer, quantizer
def _quantize(self, x: torch.Tensor) -> torch.Tensor:
# important to reset values, otherwise takes running min and max
self.observer.reset_min_max_vals()
# forward through the observer to get scale(s) and zero_point(s)
_ = self.observer(x)
scale, zero_point = self.observer.calculate_qparams()
# Emulate quantization. Not a no-op since we lose precision when quantizing.
if self.quantize_per_tensor:
xq = self.quantizer(x, float(scale), int(zero_point), dtype=torch.quint8)
else:
scale = scale.to(x.device)
zero_point = zero_point.to(x.device)
xq = self.quantizer(x, scale, zero_point, axis=0, dtype=torch.quint8)
return xq
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _on_client_before_transmission(self, message: Message) -> Message:
"""
We quantize the weights but do not quantize the biases since
the overhead is very small. We copy the state dict since the
tensor format changes.
"""
message.populate_state_dict()
new_state_dict = OrderedDict()
for name, param in message.model_state_dict.items():
if param.ndim > 1:
new_state_dict[name] = self._quantize(param.data)
else:
new_state_dict[name] = param.data
message.model_state_dict = new_state_dict
return message
def _on_server_before_transmission(self, message: Message) -> Message:
message.populate_state_dict()
return message
def _on_server_after_reception(self, message: Message) -> Message:
"""
We dequantize the weights and do not dequantize the biases
since they have not beed quantized in the first place. We
copy the state dict since the tensor format changes.
"""
new_state_dict = OrderedDict()
for name, param in message.model_state_dict.items():
if param.ndim > 1:
# pyre-ignore[16]: `torch.Tensor` has no attribute `dequantize`
new_state_dict[name] = param.data.dequantize()
else:
new_state_dict[name] = param.data
message.model_state_dict = new_state_dict
message.update_model_()
return message
@dataclass
class ScalarQuantizationChannelConfig(FLChannelConfig):
_target_: str = fullclassname(ScalarQuantizationChannel)
n_bits: int = 8
quantize_per_tensor: bool = True
```
#### File: flsim/common/fine_tuner.py
```python
from typing import Any, Iterable, Tuple
from flsim.clients.base_client import ClientConfig
from flsim.common.timeline import Timeline
from flsim.data.data_provider import IFLUserData
from flsim.interfaces.metrics_reporter import IFLMetricsReporter, TrainingStage
from flsim.interfaces.model import IFLModel
from flsim.utils.cuda import CudaTransferMinimizer
from hydra.utils import instantiate
from tqdm import tqdm
class FineTuner:
@classmethod
def fine_tune_and_evaluate(
cls,
data: Iterable[IFLUserData],
global_model: IFLModel,
client_config: ClientConfig,
metric_reporter: IFLMetricsReporter,
cuda_state_manager: CudaTransferMinimizer,
training_stage: TrainingStage,
timeline: Timeline,
epochs: int,
) -> Tuple[Any, bool]:
for user_data in tqdm(
data,
desc="Fine-tune clients",
unit="client",
):
FineTuner.fine_tune_model(
global_model=global_model,
data=user_data,
client_config=client_config,
metric_reporter=metric_reporter,
cuda_state_manager=cuda_state_manager,
epochs=epochs,
)
return metric_reporter.report_metrics(
model=global_model,
reset=True,
stage=training_stage,
timeline=timeline,
epoch=timeline.global_round_num(), # for legacy
print_to_channels=True,
)
@classmethod
def fine_tune_model(
cls,
global_model: IFLModel,
data: IFLUserData,
client_config: ClientConfig,
metric_reporter: IFLMetricsReporter,
cuda_state_manager: CudaTransferMinimizer,
epochs: int,
) -> IFLModel:
eval_client = instantiate(
client_config,
_partial_=False,
dataset=data,
cuda_manager=cuda_state_manager,
)
fine_tuned_model, _, _ = eval_client.copy_and_train_model(
model=global_model, epochs=epochs
)
eval_client.eval(model=fine_tuned_model, metric_reporter=metric_reporter)
return fine_tuned_model
```
#### File: flsim/common/logger.py
```python
import logging
import sys
LOGGING_LEVEL: int = logging.WARNING
# If this flag is true logging will be printed to both stdout and stderr
PRINT_TO_STDOUT = False
class Logger:
parent_name = "FLSimLogger"
_instance = None
logging_level = LOGGING_LEVEL
print_to_stdout = PRINT_TO_STDOUT
parent_logger = logging.getLogger(parent_name)
parent_logger.setLevel(logging_level)
children_loggers = []
@classmethod
def get_logger(cls, name: str) -> logging.Logger:
"""Returns a Logger which is a child of the PARENT_LOGGER. Logger hierarchy is
through the name, with `.` used to denote hierarchy levels.
"""
logger = logging.getLogger(cls.parent_name + "." + name)
if cls.print_to_stdout:
handler = logging.StreamHandler(sys.stdout)
cls.parent_logger.addHandler(handler)
cls.children_loggers.append(logger)
return logger
@classmethod
def set_logging_level(cls, level: int) -> None:
cls.parent_logger.setLevel(level)
for child_logger in cls.children_loggers:
child_logger.setLevel(0)
```
#### File: flsim/common/timeline.py
```python
from typing import NamedTuple
EPS = 1e-10
class Timeline(NamedTuple):
"""
A point in time of the simulation.
This contains epoch, round, and rounds_per_epoch or
global_round and rounds_per_epoch. All of these values
start from 1 by flsim convention. If any of these
need not be set the default value of 0 is assinged, which
helps us handle collision cases / initialization errors.
Timeline usually refers to how many epochs/rounds have *finished*
"""
epoch: int = 0
round: int = 0
global_round: int = 0
rounds_per_epoch: int = 1
def global_round_num(self) -> int:
r"""
Returns the global round number starting from 1.
For example, if round is 2 and epoch is 10 and num_rounds_per_epoch is 10
it will return 102.
"""
assert (self.epoch and self.round) or self.global_round
return (
self.global_round or (self.epoch - 1) * self.rounds_per_epoch + self.round
)
def as_float(self, offset: int = 0) -> float:
r"""
prints the time-line as a floating number.
E.g. if round is 2 and epoch is 10 and num_rounds_per_epoch is 10
it will return 10.2. By default uses the round value of the object
but can also show a value for a run that is offset away from the
current round.
"""
return (self.global_round_num() + offset) / self.rounds_per_epoch
def tick(self, tick_interval: float) -> bool:
r"""
Returns true 'tick_interval' times every epoch
E.g: if tick_interval is 10, it returns true 10 times every epoch
This function is useful for deciding when to report train/eval results
"""
return (self.as_float() + EPS) // tick_interval > (
self.as_float(-1) + EPS
) // tick_interval
def __str__(self):
assert (self.epoch > 0 and self.round > 0) or self.global_round > 0
e = self.epoch or ((self.global_round - 1) // self.rounds_per_epoch + 1)
r = self.round or (((self.global_round - 1) % self.rounds_per_epoch) + 1)
gr = self.global_round or ((e - 1) * self.rounds_per_epoch + r)
return f"(epoch = {e}, round = {r}, global round = {gr})"
```
#### File: flsim/data/data_provider.py
```python
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Iterable, Iterator, List, Optional
from flsim.interfaces.model import IFLModel
class IFLUserData(ABC):
"""
Wraps data for a single user
IFLUserData is responsible for
1. Keeping track of the number of examples for a particular user
2. Keeping track of the number of batches for a particular user
3. Providing an iterator over all the user batches
"""
def num_total_examples(self) -> int:
"""
Returns the number of examples
"""
return self.num_train_examples() + self.num_eval_examples()
def num_total_batches(self) -> int:
"""
Returns the number of batches
"""
return self.num_train_batches() + self.num_eval_batches()
@abstractmethod
def num_train_examples(self) -> int:
"""
Returns the number of train examples
"""
@abstractmethod
def num_eval_examples(self) -> int:
"""
Returns the number of eval examples
"""
@abstractmethod
def num_train_batches(self) -> int:
"""
Returns the number of training batches
"""
@abstractmethod
def num_eval_batches(self) -> int:
"""
Returns the number of eval batches
"""
@abstractmethod
def train_data(self) -> Iterator[Any]:
"""
Returns the training batches
"""
@abstractmethod
def eval_data(self) -> Iterator[Any]:
"""
Returns the eval batches
"""
class IFLDataProvider(ABC):
"""
Provides data to the trainer
IFLDataProvider is resposible for
1. Enforcing a uniform interface that trainer expects
2. Transforming data into what IFLModel.fl_forward() is going to consume
3. Keeping track of the sharded client data
"""
@abstractmethod
def train_user_ids(self) -> List[int]:
"""
Returns a list of user ids in the data set
"""
@abstractmethod
def num_train_users(self) -> int:
"""
Returns the number of users in train set
"""
@abstractmethod
def get_train_user(self, user_index: int) -> IFLUserData:
"""
Returns train user from user_index
"""
@abstractmethod
def train_users(self) -> Iterable[IFLUserData]:
"""
Returns training users iterable
"""
@abstractmethod
def eval_users(self) -> Iterable[IFLUserData]:
"""
Returns evaluation users iterable
"""
@abstractmethod
def test_users(self) -> Iterable[IFLUserData]:
"""
Returns test users iterable
"""
class FLUserDataFromList(IFLUserData):
"""
Util class to create an IFLUserData from a list of user batches
"""
def __init__(
self, data: Iterable, model: IFLModel, eval_batches: Optional[Iterable] = None
):
self.data = data
self._num_examples: int = 0
self._num_batches: int = 0
self.model = model
self.training_batches = []
self.eval_batches = eval_batches if eval_batches is not None else []
for batch in self.data:
training_batch = self.model.fl_create_training_batch(batch=batch)
self.training_batches.append(training_batch)
self._num_examples += model.get_num_examples(training_batch)
self._num_batches += 1
def train_data(self):
for batch in self.training_batches:
yield batch
def eval_data(self):
for batch in self.eval_batches:
yield self.model.fl_create_training_batch(batch=batch)
def num_batches(self):
return self._num_batches
def num_train_examples(self):
return self._num_examples
def num_eval_batches(self):
return 0
def num_train_batches(self):
return self._num_batches
def num_eval_examples(self):
return 0
class FLDataProviderFromList(IFLDataProvider):
r"""
Util class to help ease the transition to IFLDataProvider
=======
Args:
train_user_list: (Iterable[Iterable[Any]]): train data
eval_user_list: (Iterable[Iterable[Any]]): eval data
test_user_list (Iterable[Iterable[Any]]): test data
model: (IFLModel): the IFLModel to create training batch for
"""
def __init__(
self,
train_user_list: Iterable[Iterable[Any]],
eval_user_list: Iterable[Iterable[Any]],
test_user_list: Iterable[Iterable[Any]],
model: IFLModel,
):
self.train_user_list = train_user_list
self.eval_user_list = eval_user_list
self.test_user_list = test_user_list
self.model = model
self._train_users = {
user_id: FLUserDataFromList(
data=user_data, eval_batches=user_data, model=model
)
for user_id, user_data in enumerate(train_user_list)
}
self._eval_users = {
user_id: FLUserDataFromList(data=[], eval_batches=user_data, model=model)
for user_id, user_data in enumerate(eval_user_list)
}
self._test_users = {
user_id: FLUserDataFromList(data=[], eval_batches=user_data, model=model)
for user_id, user_data in enumerate(test_user_list)
}
def train_user_ids(self):
return list(self._train_users.keys())
def num_train_users(self):
return len(self.train_user_ids())
def get_train_user(self, user_index: int):
if user_index in self._train_users:
return self._train_users[user_index]
else:
raise IndexError(f"Index {user_index} not in {self.train_user_ids()}")
def train_users(self):
return list(self._train_users.values())
def eval_users(self):
return list(self._eval_users.values())
def test_users(self):
return list(self._test_users.values())
```
#### File: utils/data/data_utils.py
```python
import hashlib
import time
from collections import defaultdict
from itertools import zip_longest
from typing import Any, Dict, Generator, Iterable, List, Optional
import torch
def batchify(
iterable: Iterable[Any], batch_size: int, drop_last: Optional[bool] = False
) -> Generator:
"""
Groups list into batches
Example:
>>> batchify([1, 2, 3, 4, 5], 2)
>>> [[1, 2], [3, 4], [5]]
"""
iterators = [iter(iterable)] * batch_size
for batch in zip_longest(*iterators, fillvalue=None):
batch = [ex for ex in batch if ex is not None]
if drop_last and len(batch) != batch_size:
break
yield batch
def merge_dicts(batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
"""
Merge a list of dictionaries into one dictionary
Example:
>>> merge_dicts([{"a": torch.Tensor([1])}, {"a": torch.Tensor([2])}])
>>> {"a": torch.Tensor([1.0, 2.0])},
"""
res = defaultdict(list)
for ex in batch:
for key, value in ex.items():
res[key].append(value)
return {k: torch.cat(v) for k, v in res.items()}
def stable_hash(base: int = 100000) -> int:
md5 = hashlib.md5(str(time.time()).encode("utf-8"))
return int.from_bytes(md5.digest(), byteorder="little") % base
```
#### File: utils/fl/target_metric.py
```python
from enum import Enum
from flsim.utils.fl.stats import RandomVariableStatsTrackerMA, AverageType
class TargetMetricDirection(Enum):
MIN = "min"
MAX = "max"
class TargetMetricTracker:
"""
Tracks the sliding window of eval metric throughout the course of training
and reports the round if eval metric target is reached
"""
def __init__(
self,
target_value: float,
window_size: int,
average_type: AverageType,
direction: TargetMetricDirection,
):
self._stats = RandomVariableStatsTrackerMA(
window_size=window_size, mode=average_type
)
self.target_value = target_value
self.window_size = window_size
self.direction = direction
def update_and_check_target(
self,
current_eval_metric: float,
) -> bool:
"""
Updates the stats tracker with latest eval metric
Return value:
True if target metric is reached.
"""
self._stats.update(current_eval_metric)
if self._stats.num_samples < self.window_size:
return False
return (
self._stats.mean() > self.target_value
if self.direction == TargetMetricDirection.MAX
else self._stats.mean() < self.target_value
)
@property
def mean(self):
return self._stats.mean()
```
#### File: utils/tests/test_async_weights.py
```python
import numpy as np
import pytest
from flsim.common.pytest_helper import assertEqual
from flsim.utils.async_trainer.async_example_weights import (
ExampleWeight,
AsyncExampleWeightConfig,
)
from flsim.utils.async_trainer.async_staleness_weights import (
AsyncStalenessWeightConfig,
StalenessWeight,
ConstantStalenessWeightConfig,
ThresholdStalenessWeightConfig,
PolynomialStalenessWeightConfig,
)
from flsim.utils.async_trainer.async_weights import AsyncWeightConfig
from flsim.utils.tests.helpers.async_weights_test_utils import (
AsyncExampleWeightsTestUtils,
AsyncStalenessWeightsTestUtils,
)
from hydra.utils import instantiate
class TestAsyncExampleWeights:
# two parametrize together produce a cartesian product
@pytest.mark.parametrize(
"example_weight_config, example_weight_class",
AsyncExampleWeightsTestUtils.EXAMPLE_WEIGHT_TEST_CONFIGS,
)
@pytest.mark.parametrize(
"staleness_weight_config, staleness_weight_class",
AsyncStalenessWeightsTestUtils.STALENESS_WEIGHT_TEST_CONFIGS,
)
def test_string_conversion(
self,
example_weight_config: AsyncExampleWeightConfig,
example_weight_class: ExampleWeight,
staleness_weight_config: AsyncStalenessWeightConfig,
staleness_weight_class: StalenessWeight,
) -> None:
"""Check that strings are correctly converted to AsyncWeight"""
obj = instantiate(
AsyncWeightConfig(
staleness_weight=staleness_weight_config,
example_weight=example_weight_config,
)
)
assertEqual(obj.example_weight.__class__, example_weight_class)
assertEqual(obj.staleness_weight.__class__, staleness_weight_class)
@pytest.mark.parametrize(
"example_weight_config, example_weight_class",
AsyncExampleWeightsTestUtils.EXAMPLE_WEIGHT_TEST_CONFIGS,
)
def test_weight_compute(
self,
example_weight_config: AsyncExampleWeightConfig,
example_weight_class: ExampleWeight,
avg_num_examples: int = 1,
avg_staleness: int = 1,
) -> None:
"""Test that all weight computation works as expected"""
max_num_examples = 10000
max_staleness = 10000
cutoff = 5000
value_after_cutoff = 0.001
exponent = 0.5
# dict below tells us how to initialize weight object for different
# staleness weight types
staleness_weight_configs = [
ConstantStalenessWeightConfig(),
ThresholdStalenessWeightConfig(
cutoff=cutoff, value_after_cutoff=value_after_cutoff
),
PolynomialStalenessWeightConfig(exponent=exponent),
]
for staleness_weight_config in staleness_weight_configs:
staleness_weight_obj = instantiate(staleness_weight_config)
# for 10 random integers
for _ in range(10):
num_examples = np.random.randint(1, max_num_examples)
staleness = np.random.randint(1, max_staleness)
staleness_weight = staleness_weight_obj.weight(staleness)
example_weight_config.avg_num_examples = avg_num_examples
example_weight_obj = instantiate(example_weight_config)
example_weight = example_weight_obj.weight(num_examples)
expected_combined_weight = example_weight * staleness_weight
combined_weight_object = instantiate(
AsyncWeightConfig(
example_weight=example_weight_config,
staleness_weight=staleness_weight_config,
)
)
combined_weight = combined_weight_object.weight(
num_examples=num_examples, staleness=staleness
)
assertEqual(expected_combined_weight, combined_weight)
``` |
{
"source": "JohnlNguyen/multihead-siamese-nets",
"score": 3
} |
#### File: multihead-siamese-nets/tests/test_similarity.py
```python
import numpy as np
import tensorflow as tf
from models.lstm import manhattan_similarity
class TestSimilarity(tf.test.TestCase):
def testManhattanSimilaritySame(self):
with self.test_session() as test_session:
x1 = np.array([[1., 1.]])
x2 = np.array([[1., 1.]])
siamese_lstm_model = manhattan_similarity(x1, x2)
actual_output = test_session.run(siamese_lstm_model)
correct_output = [1.]
self.assertEqual(actual_output, correct_output)
def testSimilarity2D(self):
with self.test_session() as test_session:
x1 = np.array([[1., 1.], [1., 1.]])
x2 = np.array([[1., 1.], [1., 1.]])
siamese_lstm_model = manhattan_similarity(x1, x2)
actual_output = test_session.run(siamese_lstm_model)
correct_output = [[1.], [1.]]
self.assertAllEqual(actual_output, correct_output)
``` |
{
"source": "JohnlNguyen/quora-question-pairs",
"score": 3
} |
#### File: source/code/word2vec.py
```python
import pandas as pd
import gensim
import logging
import time
import numpy as np
from sklearn.metrics import log_loss
import matplotlib.pyplot as plt
data = pd.read_csv("train.csv")
#################################
#########Data Cleaning###########
#################################
#remove punctuation
def string_process(str_in):
"""
Change a sentence in to lowercase and remove punctuation
:param str_in (str): the process sentence
:return: str: the processed sentence
"""
punc = '?,!.()\'":'
str_out = str_in.lower()
for i in punc:
str_out = str_out.replace(i, " ")
return str_out
data_question = data[['question1','question2','is_duplicate']]
#find problematic rows
drop_rows = []
for i in range(len(data_question.question2)):
if type(data_question.question2[i]) == float:
drop_rows.append(i)
for i in range(len(data_question.question1)):
if type(data_question.question1[i]) == float:
drop_rows.append(i)
#create a new copy for modification
new_data_df = data_question.copy()
#drop problematic rows
new_data_df.drop(new_data_df.index[drop_rows], inplace=True)
#remove punctuation
new_data_df.question1 = new_data_df.question1.apply(string_process)
new_data_df.question2 = new_data_df.question2.apply(string_process)
#split words in a sentence (also use this for word2vec section)
que1 = [i.split() for i in new_data_df.question1]
que2 = [i.split() for i in new_data_df.question2]
#build sentence base, question 1 combines with question 2
que = que1 + que2
#################################
###########Build Model###########
#################################
start = time.time()
model = gensim.models.Word2Vec(que, size=100, window=5, min_count=5, workers=4)
end = time.time()
print('Run Time:', end-start)
#################################
########Get predicted y##########
#################################
from progressbar import ProgressBar
bar = ProgressBar()
similarity_rate = 0.1
top_pick_num = 10
overlap_score_model = []
for q1, q2 in bar(zip(que1,que2)):
score1 = 0
score2 = 0
#handle score 1
for i in q1:
try:
check_list_q1 = model.most_similar(positive=[i])
picked_q1 = [i[0] for i in check_list_q1 if i[1] >= similarity_rate]
if len(picked_q1) <= top_pick_num:
selected_q1 = picked_q1
else:
selected_q1 = picked_q1[0:top_pick_num]
for i in selected_q1:
if i in q2:
score1 += 1
except:
score1 = 0
#handle score 2
for i in q2:
try:
check_list_q2 = model.most_similar(positive=[i])
picked_q2 = [i[0] for i in check_list_q2 if i[1] >= similarity_rate]
if len(picked_q2) <= top_pick_num:
selected_q2 = picked_q2
else:
selected_q2 = picked_q2[0:top_pick_num]
for i in selected_q2:
if i in q1:
score2 += 1
except:
score2 = 0
overlapping_model = (score1 + score2)/(len(q1) + len(q2))
overlap_score_model.append(overlapping_model)
#################################
#######Calculate Accuracy########
#################################
def cal_accuracy_model(thr):
predicted_model = list(((np.array(overlap_score_model) - thr) > 0) * 1)
accuracy_model = np.sum(predicted_model == new_data_df.is_duplicate)/len(predicted_model)
return accuracy_model
#get accuracy (testing different thresholds)
accuracy_thr_model = [cal_accuracy_model(i) for i in list(np.arange(0,1,0.1))]
#################################
############Log Loss#############
#################################
#using threshold = 0.5
predicted_model = list(((np.array(overlap_score_model) - 0.5) > 0) * 1)
true = list(new_data_df['is_duplicate'])
print(log_loss(true, predicted_model))
#################################
###Plot Accuracy vs Threshold####
#################################
x = list(np.arange(0,1,0.1))
plt.figure(figsize=(10,5))
plt.plot(x, accuracy_thr_model)
plt.title('Accuracy vs Threshold with word2vec implementation')
plt.xlabel('Threshold')
plt.ylabel('Accuracy')
plt.grid()
plt.show()
``` |
{
"source": "JohnlNguyen/semantic_code_search",
"score": 3
} |
#### File: semantic_code_search/preproc/json_to_seq2seq.py
```python
from __future__ import print_function
import sys, json
def to_seq2seq(json_file, seq_input, seq_output):
dataset = json.load(open(json_file))
with open(seq_input, 'w') as f_inp, open(seq_output, 'w') as f_out:
for example in dataset:
f_inp.write(' '.join(example['intent_tokens']) + '\n')
f_out.write(' '.join(example['snippet_tokens']) + '\n')
if __name__ == '__main__':
json_file = sys.argv[1]
seq_input = sys.argv[2]
seq_output = sys.argv[3]
to_seq2seq(json_file, seq_input, seq_output)
```
#### File: semantic_code_search/usr_dir/semantic_search.py
```python
from pathlib import Path
import os
import pandas as pd
import tensorflow as tf
from six import StringIO
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import text_problems
from tensor2tensor.data_generators.function_docstring import GithubFunctionDocstring
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import translate
from tensor2tensor.data_generators.extract_raw_data import extract_data
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
from nltk.tokenize import RegexpTokenizer
from sklearn.model_selection import train_test_split
from usr_dir.utils import read_from_file
_CONALA_TRAIN_DATASETS = [
[
"gs://conala/",
("train/conala-train.intent",
"train/conala-train.code")
],
[
"gs://conala/",
("mined/conala-train-mined.intent", "mined/conala-train-mined.code")
],
]
@registry.register_problem
class SemanticSearch(text_problems.Text2TextProblem):
"""
"""
def __init__(self, was_reversed=False, was_copy=False):
super(SemanticSearch, self).__init__(
was_reversed=False, was_copy=False)
@property
def vocab_type(self):
return text_problems.VocabType.SUBWORD
@property
def base_url(self):
return "gs://conala"
@property
def test_file(self):
return '{}/{}'.format(self.base_url, "conala-test.json"), "conala-test.json"
@property
def file_names(self):
return [
"conala-mined.jsonl",
"conala-train.json"
]
@property
def pair_files_list(self):
"""
This function returns a list of (url, file name) pairs
"""
return [
('{}/{}'.format(self.base_url, name),
name)
for name in self.file_names
]
@property
def is_generate_per_split(self):
return True
@property
def approx_vocab_size(self):
return 2 ** 14 # ~16
@property
def max_samples_for_vocab(self):
return int(3.5e5)
@property
def oov_token(self):
return "UNK"
@classmethod
def github_data(cls, data_dir, tmp_dir, dataset_split):
"""
Using data from function_docstring problem
"""
github = GithubFunctionDocstring()
return github.generate_samples(data_dir, tmp_dir, dataset_split)
def maybe_download_conala(self, tmp_dir):
all_files = [
generator_utils.maybe_download(tmp_dir, file_name, uri)
for uri, file_name in self.pair_files_list
]
return all_files
def maybe_split_data(self, tmp_dir, extracted_files, use_mined=True):
train_file = os.path.join(
tmp_dir, 'conala-joined-prod-train.json' if use_mined else 'conala-prod-train.json')
valid_file = os.path.join(
tmp_dir, 'conala-joined-prod-valid.json' if use_mined else 'conala-prod-valid.json')
if tf.gfile.Exists(train_file) or tf.gfile.Exists(valid_file):
tf.logging.info("Not splitting, file exists")
else:
if use_mined:
df = self.join_mined_and_train(tmp_dir, extracted_files)
else:
train_path = os.path.join(tmp_dir, 'conala-train.json.prod')
assert tf.gfile.Exists(train_path)
df = pd.read_json(train_path)
train, valid = train_test_split(
df, test_size=0.10, random_state=42)
train[['intent_tokens', 'snippet_tokens']].to_json(train_file)
valid[['intent_tokens', 'snippet_tokens']].to_json(valid_file)
return train_file, valid_file
def join_mined_and_train(self, tmp_dir, extracted_files):
df = pd.DataFrame([])
for extracted_file in extracted_files:
if 'test' not in extracted_file:
file_path = os.path.join(tmp_dir, extracted_file)
df = df.append(pd.read_json(file_path),
ignore_index=True, sort=False)
return df
def generate_samples(self, data_dir, tmp_dir, dataset_split):
"""A generator to return data samples.Returns the data generator to return.
Args:
data_dir: A string representing the data directory.
tmp_dir: A string representing the temporary directory and is¬
used to download files if not already available.
dataset_split: Train, Test or Eval.
Yields:
Each element yielded is of a Python dict of the form
{"inputs": "STRING", "targets": "STRING"}
"""
extracted_files, train_filename, valid_filename = self.process_files(
tmp_dir)
if dataset_split == problem.DatasetSplit.TRAIN:
df = pd.read_json(train_filename)
for row in df.itertuples():
yield self.get_row(row)
elif dataset_split == problem.DatasetSplit.EVAL:
df = pd.read_json(valid_filename)
for row in df.itertuples():
yield self.get_row(row)
def eval_metrics(self):
return [
metrics.Metrics.ACC,
metrics.Metrics.APPROX_BLEU
]
def get_row(self, row):
return {"inputs": " ".join(row.intent_tokens),
"targets": " ".join(row.snippet_tokens)}
def process_files(self, tmp_dir):
self.maybe_download_conala(tmp_dir)
extracted_files = extract_data(tmp_dir, False)
train_filename, valid_filename = self.maybe_split_data(
tmp_dir, extracted_files, use_mined=False)
return extracted_files, train_filename, valid_filename
@registry.register_problem
class SemanticSearchAst(SemanticSearch):
"""
Structure this problem as a translate problem
"""
@property
def is_generate_per_split(self):
return False
@property
def vocab_type(self):
return text_problems.VocabType.SUBWORD
def generate_samples(self, data_dir, tmp_dir, dataset_split):
extracted_files, train_filename, valid_filename = self.process_files(
tmp_dir)
intent_path = os.path.join(tmp_dir, 'conala-train.json.prod')
ast_path = os.path.join(tmp_dir, 'conala-train-ast.txt')
assert tf.gfile.Exists(intent_path) and tf.gfile.Exists(ast_path)
intents = pd.read_json(intent_path).intent_tokens
ast_nodes = read_from_file(ast_path)
for intent_tokens, ast_node in zip(intents, ast_nodes):
yield {"inputs": " ".join(intent_tokens), "targets": ast_node}
```
#### File: semantic_code_search/usr_dir/utils.py
```python
from pathlib import Path
import os
import pandas as pd
import tensorflow as tf
import ast
from six import StringIO
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import text_problems
from tensor2tensor.data_generators.function_docstring import GithubFunctionDocstring
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import translate
from tensor2tensor.data_generators.extract_raw_data import extract_data
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
from nltk.tokenize import RegexpTokenizer
from sklearn.model_selection import train_test_split
def write_to_file(file_name, data):
with open(file_name, 'w+') as f:
for v in data:
f.write(v + "\n")
def read_from_file(file_name):
with open(file_name, 'r') as f:
return list(f)
def generate_vocab(tmp_dir, extracted_files):
if self.vocab_type != text_problems.VocabType.TOKEN:
tf.logging.info(
"Not generating vocab file, vocab type is not token")
return
vocab_file = os.path.join(tmp_dir, "vocab.semantic_search.tokens")
if tf.gfile.Exists(vocab_file):
tf.logging.info("Skipping vocab generation, vocab file exists")
return
vocab = ["UNK"]
for file in extracted_files:
file_path = os.path.join(tmp_dir, file)
assert tf.gfile.Exists(file_path)
df = pd.read_json(file_path)
for row in df[['intent_tokens', 'snippet_tokens']].itertuples():
vocab.extend(row.intent_tokens)
vocab.extend(row.snippet_tokens)
vocab = set(vocab)
write_to_file(file_name, vocab)
def tokenize_code(cls, text: str):
"A very basic procedure for tokenizing code strings."
return RegexpTokenizer(r'\w+').tokenize(text)
def generate_linearized_ast(code_snippet):
return ast.dump(ast.parse(code_snippet).body[0], annotate_fields=False)
``` |
{
"source": "john-lock/chatter",
"score": 2
} |
#### File: john-lock/chatter/app.py
```python
import os
from flask import Flask, render_template, request, json
from flask_cors import CORS
from pusher import pusher
# import simplejson
app = Flask(__name__)
cors = CORS(app)
app.config["CORS_HEADERS"] = "Content-Type"
PUSHER_APP_ID = os.environ["PUSHER_APP_ID"]
PUSHER_APP_KEY = os.environ["PUSHER_APP_KEY"]
PUSHER_APP_SECRET = os.environ["PUSHER_APP_SECRET"]
pusher = pusher_client = pusher.Pusher(
app_id=PUSHER_APP_ID,
key=PUSHER_APP_KEY,
secret=PUSHER_APP_SECRET,
cluster="eu",
ssl=True,
)
@app.route("/")
def index():
return render_template("index.html", PUSHER_APP_KEY=PUSHER_APP_KEY)
@app.route("/admin")
def admin():
return render_template("admin.html", PUSHER_APP_KEY=PUSHER_APP_KEY)
@app.route("/new/guest", methods=["POST"])
def guestUser():
data = request.json
pusher.trigger(
u"general-channel",
u"new-guest-details",
{"name": data["name"], "email": data["email"]},
)
return json.dumps(data)
@app.route("/pusher/auth", methods=["POST"])
def pusher_authentication():
auth = pusher.authenticate(
channel=request.form["channel_name"], socket_id=request.form["socket_id"]
)
return json.dumps(auth)
if __name__ == "__main__":
app.run()
``` |
{
"source": "JohnLockwood/100-days-of-python",
"score": 3
} |
#### File: package_demo/package1/pathprint.py
```python
import sys
import pprint
def print_path():
"""Pretty print the system path"""
print("Printing the search path:")
pprint.pprint(sys.path)
```
#### File: day-13/DictionaryCompare/first_python_file.py
```python
import numpy as np
import numpy.typing as nptype
l = [x for x in range(1, 11)]
n1 = 5
n3 = 3/5
n4 = 3//5
np_arr = np.array([x for x in range(1, 11)])
message = "Hello world"
print(np_arr)
print(message)
def get_np_array():
"""Return a 4x2 numpy array"""
arr = np.arange(8.).reshape(4,2)
return arr
def documented_fn():
"""I am some function documentation. Let's see how I look in Sphinx!"""
pass
np_arr3 = get_np_array()
``` |
{
"source": "JohnLockwood/Goalboost",
"score": 2
} |
#### File: blueprints/api/timer_resource.py
```python
from flask import jsonify, request, Response
import http.client
from flask.ext.login import current_user
from flask.ext.restful.reqparse import RequestParser
from goalboost.blueprints.api.restful_resource import RestfulResource
from goalboost.blueprints.auth.token_auth import httpBasicAuth
from goalboost.model.timer_models import TimerDAO, TimerFormatter, Timer
class TimerResource(RestfulResource):
@httpBasicAuth.login_required
def get_one(self, id):
parser = RequestParser()
# Look only in the querystring
parser.add_argument('user', location='args', default=None)
args = parser.parse_args(request)
dao = TimerDAO()
timer = dao.get(id)
as_dict = TimerFormatter().model_to_dict(timer)
resp = jsonify(as_dict)
return resp
# This will likely be the most complicated endpoint, since we need
# to decide what query strings to support. See also _get_args.
@httpBasicAuth.login_required
def get_many(self):
args = self._get_args(request)
# For now, just get everything for the current user:
dao = TimerDAO()
timers = dao.get_all_timers_for_user(current_user.id)
formatter = TimerFormatter()
timers_payload = [formatter.model_to_dict(timer) for timer in timers]
return jsonify(dict(timers=timers_payload))
#as_dict = TimerFormatter().model_to_dict(timer)
# Just an example for now -- we can build this out to include
# filter by date, by user other than current (need more roles / authentication work)
# etc.
def _get_args(self, request):
parser = RequestParser()
parser.add_argument('user', location='args', default=None)
args = parser.parse_args(request)
return args
# Does this implementation become any cleaner without MongoEngine This seems like a long way around?
@httpBasicAuth.login_required
def post(self):
api_v1_root = '/api/v1'
timer_new = TimerFormatter().dict_to_model(Timer, request.json)
dao = TimerDAO()
if timer_new.id is not None:
timer = dao.get(timer_new.id)
timer.update_attributes(timer_new)
timer_new = timer
dao.put(timer_new)
resp = jsonify(TimerFormatter().model_to_dict(timer_new))
#resp.headers["Location"] = api_v1_root + self.root + "/" + id
resp.headers["Location"] = self.make_location(request.url, timer_new.id)
resp.status_code = http.client.CREATED
return resp
# TODO WRONG RETURN
@httpBasicAuth.login_required
def put(self, id):
#api_v1_root = '/api/v1'
timer_new = TimerFormatter().dict_to_model(Timer, request.json)
dao = TimerDAO()
timer = dao.get(id)
if timer is not None:
timer.update_attributes(timer_new)
dao.put(timer)
id = str(timer.id)
resp = jsonify(dict(id=id))
#resp.headers["Location"] = api_v1_root + self.root + "/" + id
resp.headers["Location"] = self.make_location(request.url, id)
resp.status_code = http.client.OK
return resp
@httpBasicAuth.login_required
def delete(self, id):
api_v1_root = '/api/v1'
dao = TimerDAO()
count_deleted = dao.delete(id)
if count_deleted == 1:
return Response(status=204)
else:
return Response(status=404)
def make_location(self, request_url, id):
location = request_url
if not location.endswith("/"):
location = location + "/"
return location + str(id)
```
#### File: Goalboost/goalboost/__init__.py
```python
from flask import Flask
from flask_mail import Mail
import os
import logging
from logging import FileHandler
from config import config
from goalboost.blueprints import api
from goalboost.blueprints.auth import init_flask_security
from goalboost.blueprints.auth.auth_controllers import bp_auth
from goalboost.blueprints.index.index_controllers import bp_index
from goalboost.blueprints.timer.controllers_timer import bp_timer
from goalboost.blueprints.api.api_controllers import init_api
from goalboost.blueprints.api.api_v1_controllers import v1_api
from goalboost.model import init_db, db
app = Flask(__name__)
def create_app(config_name):
global app
app.config.from_object(config[config_name])
# Setup database
# Currently inits mongoDB
init_db(app)
# Todo make intializing blueprints consistent
app.register_blueprint(bp_index)
app.register_blueprint(bp_auth)
app.register_blueprint(bp_timer)
app.register_blueprint(v1_api)
init_api(app)
file_handler = FileHandler("flask.log")
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
config[config_name].init_app(app)
#
init_flask_security(app)
mail = Mail(app)
return app
create_app(os.environ.get("GOALBOOST_CONFIGURATION") or "development")
```
#### File: goalboost/model/__init__.py
```python
from flask.ext.mongoengine import MongoEngine
db = MongoEngine()
def init_db(app):
global db
db.init_app(app)
```
#### File: goalboost/model/model_formatter.py
```python
from json import dumps, loads
from bson import ObjectId
from goalboost.model import db
class ModelFormatter(object):
# A default base class implementation.
def model_to_dict(self, object_as_model, include=None, exclude=None):
as_dict = loads(object_as_model.to_json())
if exclude is not None:
subset = {key: value for key, value in as_dict.items() if key not in include}
return subset
elif include is not None:
subset = {key: value for key, value in as_dict.items() if key in include}
return subset
else:
return as_dict
def dict_to_model(self, class_name, object_as_dict):
return class_name.from_json(dumps(object_as_dict))
# Adds a key / value pair to model_dict. If the property on model_object
# identified by the key is None, add None as value, otherwise stringify
# the value
def add_string_property(self, key, model_object, model_dict):
val = getattr(model_object, key)
if val is None:
model_dict[key] = None
else:
model_dict[key] = str(val)
# Add a value that's not a string and that shouldn't be converted to
# one. E.g., boolean, number etc.
def add_property(self, key, model_object, model_dict):
model_dict[key] = getattr(model_object, key)
# For List fields
def add_list_property(self, key, object_as_model, as_dict):
value = getattr(object_as_model, key)
if value is None:
as_dict[key] = None
else:
as_dict[key] = [item for item in value] # Convert to python list
```
#### File: goalboost/model/mongomint.py
```python
from bson import ObjectId
class MongoMintDocument(object):
"""Create a MongoMintDocument object.
pymongo_client_object - a MongoClient instance
collection_name - a name for the collection we want to validate on / operate against
database (optional, default="goalboost"), a name for the database where the collection will live
"""
def __init__(self, pymongo_client_object, collection_name, database="goalboost"):
self.connection = pymongo_client_object
self.database = database
self.collection_name = collection_name
self._clear_validation_errors()
"""upsert - easy single document save.
If validate generates no error, then insert a new document or update
the existing one, returning true.
If validate did return an error, return false. In that case. self.errors can
be inspected to find what happened.
"""
def upsert(self, model_dictionary):
if self._is_valid(model_dictionary):
if "_id" in model_dictionary: # Update case
self.collection.replace_one( \
filter = {"_id" : model_dictionary["_id"]}, replacement=model_dictionary)
else: # Insert case
self.collection.insert_one(model_dictionary)
return True
else:
return False
"""find_by_id - Return a single document by id or None, accepting either string or ObjectId as argument"""
# Always create an ObjectId, correctly handles both string case and ObjectId case
def find_by_id(self, id):
return self.collection.find_one({"_id": ObjectId(id) })
"""collection (property)
Easily use the underlying collection to use native pymongo methods, e.g. drop, find_one, etc.
"""
@property
def collection(self):
return self.connection[self.database][self.collection_name]
"""validate (optional override allows you to provide pre-save data validation
To implement if needed, add one string per validation failure to self.errors.
"""
def validate(self, model_dictionary):
pass
# Private methods
def _clear_validation_errors(self):
self.errors = []
def _is_valid(self, model_dictionary):
self._clear_validation_errors()
self.validate(model_dictionary)
return len(self.errors) == 0
```
#### File: model/queries/user_account.py
```python
from goalboost.model import auth_models
class UserAccountQueries:
def __init__(self):
self.users = auth_models.User()._get_collection()
# Todo Error handling, why is list none, etc.?
def get_users_for_account(self, account):
pass
#
# users = []
# account = self.accounts.find_one({"name": account})
# # Todo Better error handling -- why is it none?
# if(account is None):
# return users
# # account is good at this point
# # print(account["_id"])
# return list(self.users.find({"account": account["_id"]}))
```
#### File: test/integration/test_auth.py
```python
from unittest import TestCase
from goalboost.model.timer_models import Timer
from test.common.test_helper import TestObjects
from goalboost.blueprints.auth import can_access_user_owned_resource
class TestResourceAccess(TestCase):
test_objects = TestObjects()
def test_user_can_access_their_own_resource(self):
test_objects = TestObjects()
test_user = test_objects.get_test_user()
timer = Timer(notes="More testing, boss", user=test_user)
timer.save()
assert(can_access_user_owned_resource(test_user, timer))
timer.delete()
def test_account_admin_can_access_resource_if_account_same(self):
test_objects = TestObjects()
test_user = test_objects.get_test_user()
timer = Timer(notes="More testing, boss", user=test_user)
timer.save()
assert(can_access_user_owned_resource(test_user, timer))
timer.delete()
def test_account_admin_cannot_access_resource_if_account_different(self):
test_objects = TestObjects()
test_user = test_objects.get_test_user()
timer = Timer(notes="More testing, boss", user=test_user)
timer.save()
assert(can_access_user_owned_resource(test_user, timer))
timer.delete()
```
#### File: unit/model/test_formatters.py
```python
from datetime import datetime
from unittest import TestCase
from goalboost.model.auth_models import UserModelFormatter, User
from goalboost.model.model_formatter import ModelFormatter
from goalboost.model.timer_models import Timer, TimerFormatter
from test.common.test_helper import TestObjects
from json import dumps
class TestModelFormatter(TestCase):
def test_can_dump_user(self):
user = TestObjects().get_test_user()
formatter = ModelFormatter()
model_as_dict = formatter.model_to_dict(user, include=["_id", "email", "account"])
assert("password" not in model_as_dict)
assert("_id" in model_as_dict)
model_as_dict = formatter.model_to_dict(user, include=["_id", "email", "account", "password"])
assert("password" in model_as_dict)
#assert(isinstance(model_as_dict["_id"], str))
def test_can_load_user(self):
user = TestObjects().get_test_user()
formatter = ModelFormatter()
model_as_dict = formatter.model_to_dict(user, include=["_id", "email", "account"])
model = formatter.dict_to_model(User, model_as_dict)
assert(model.id == user.id)
assert(model.email == user.email)
class TestUserModelFormatter(TestCase):
def test_can_convert_user_to_dict(self):
user = TestObjects().get_test_user()
formatter = UserModelFormatter()
user_dict = formatter.model_to_dict(user)
# This all passes because our custom implementation takes care of these
# Extended details
assert(user_dict["account"]["name"] == user.account.name)
assert(user_dict["confirmed_at"] == None)
assert("password" not in user_dict)
# Important!!! make sure we can convert to JSON:
as_json = dumps(user_dict)
assert(isinstance(as_json, str))
def test_can_convert_user_to_dict_using_model_formatter(self):
user = TestObjects().get_test_user()
formatter = ModelFormatter()
user_dict = formatter.model_to_dict(user)
# This won't pass because we're using base class implementation
# assert(user_dict["account"]["name"] == user.account.name)
#assert(user_dict["confirmed_at"] == None)
# Important!!! make sure we can convert to JSON:
as_json = dumps(user_dict)
assert(isinstance(as_json, str))
class TestTimerModelFormatter(TestCase):
def test_can_convert_timer_to_dict(self):
user = TestObjects().get_test_user()
timer = Timer(notes="Just a test timer", user=user, tags=["Unit Tests"])
tf = TimerFormatter()
timer_entity_as_dict = tf.model_to_dict(timer)
assert(timer_entity_as_dict is not None)
assert(timer_entity_as_dict["notes"] == timer.notes)
def test_can_dump_and_load_timer(self):
user = TestObjects().get_test_user()
timer = Timer(notes="Just a test timer", user=user, tags=["Unit Tests"], seconds = 22, running = True)
timer.save()
tf = TimerFormatter()
timer_entity_as_dict = tf.model_to_dict(timer)
timer.delete()
timer2 = tf.dict_to_model(Timer, timer_entity_as_dict)
# This won't pass, there are subtle, trivial differences in datetimes once dates have been serialized
#assert(timer.lastRestart == timer2.lastRestart)
#assert(timer.dateEntered == timer2.dateEntered)
assert(timer.tags == timer2.tags)
assert(timer.running == timer2.running)
assert(timer.seconds == timer2.seconds)
assert(timer.notes == timer2.notes)
assert(timer.user == timer2.user)
assert(timer.to_json() == timer2.to_json())
class TestTimerFormatter(TestCase):
def test_get_week_ending_date(self):
formatter = TimerFormatter()
assert(formatter.get_week_ending_date(2016,0) == "01/02/2016")
assert(formatter.get_week_ending_date(2015,0) == "01/03/2015")
assert(formatter.get_week_ending_date(2014,0) == "01/04/2014")
assert(formatter.get_week_ending_date(2001,0) == "01/06/2001")
assert(formatter.get_week_ending_date(2005,0) == "01/01/2005")
assert(formatter.get_week_ending_date(2006,0) == "01/07/2006")
assert(formatter.get_week_ending_date(2015,7) == "02/21/2015")
assert(formatter.get_week_ending_date(2016,11) == "03/19/2016")
``` |
{
"source": "johnlockwood-wf/furious",
"score": 2
} |
#### File: furious/example/context_complex.py
```python
import logging
from google.appengine.api import memcache
import webapp2
class ContextComplexHandler(webapp2.RequestHandler):
"""Demonstrate using a Context to batch insert a
group of furious tasks."""
def get(self):
from furious.async import Async
from furious import context
# Create a new furious Context.
with context.new(callbacks={'internal_vertex_combiner': l_combiner,
'leaf_combiner': l_combiner,
'success': example_callback_success}) as ctx:
# "Manually" instantiate and add an Async object to the Context.
async_task = Async(
target=example_function, kwargs={'first': 'async'})
ctx.add(async_task)
logging.info('Added manual job to context.')
# instantiate and add an Async who's function creates another Context.
# enabling extra fan-out of a job
async_task = Async(
target=make_a_new_context_example, kwargs={'extra': 'async'})
ctx.add(async_task)
logging.info('Added sub context')
# Use the shorthand style, note that add returns the Async object.
for i in xrange(25):
ctx.add(target=example_function, args=[i])
logging.info('Added job %d to context.', i)
# Instantiate and add an Async who's function creates another Async
# enabling portions of the job to be serial
async_task = Async(
target=make_a_new_async_example, kwargs={'second': 'async'})
ctx.add(async_task)
logging.info('Added added async that returns an async')
# When the Context is exited, the tasks are inserted (if there are no
# errors).
logging.info('Async jobs for context batch inserted.')
self.response.out.write('Successfully inserted a '
'group of Async jobs with Furious:{0}'.format(ctx.id))
def l_combiner(results):
return reduce(lambda x, y: x + y, results, 0)
def iv_combiner(results):
return results
def example_callback_success(id, result):
memcache.set("Furious:{0}".format(id), "done by callback")
def example_function(*args, **kwargs):
"""This function is called by furious tasks to demonstrate usage."""
logging.info('example_function executed with args: %r, kwargs: %r',
args, kwargs)
return l_combiner(args)
def make_a_new_async_example(*args, **kwargs):
from furious.async import Async
async_task = Async(
target=example_function, args=[500])
return async_task
def make_a_new_context_example(*args, **kwargs):
from furious import context
ctx = context.Context(callbacks={'internal_vertex_combiner': l_combiner,
'leaf_combiner': l_combiner,
'success': example_callback_success})
# Use the shorthand style, note that add returns the Async object.
for i in xrange(2):
ctx.add(target=example_function, args=[i])
logging.info('Added job %d to context.', i)
return ctx
```
#### File: tests/marker_tree/test_marker.py
```python
import unittest
from google.appengine.ext import testbed
from mock import patch
class TestMarker(unittest.TestCase):
def setUp(self):
import os
import uuid
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_taskqueue_stub()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub()
# Ensure each test looks like it is in a new request.
os.environ['REQUEST_ID_HASH'] = uuid.uuid4().hex
def tearDown(self):
self.testbed.deactivate()
@patch("furious.marker_tree.identity_utils.InvalidGroupId", autospec=True)
def test_async_to_marker(self, invalid_group_id):
from furious.marker_tree.marker import Marker
from furious.async import Async
task = Async(target=dir)
task.id = "1"
marker = Marker.from_async(task)
self.assertIsNone(marker.group_id)
def test_is_marker_leaf(self):
"""
Make sure a marker can report if it is a leaf marker
or not
"""
from furious.marker_tree.identity_utils import leaf_persistence_id_from_group_id
from furious.marker_tree.marker import Marker
root_marker = Marker(id="polly")
for x in xrange(3):
root_marker.children.append(
Marker(id=leaf_persistence_id_from_group_id(
root_marker.id, x)))
originally_a_leaf_marker = root_marker.children[0]
sub_tree_marker = Marker(
id=originally_a_leaf_marker.id,
children=[Marker(id=leaf_persistence_id_from_group_id(
originally_a_leaf_marker.id, i))
for i in xrange(3)])
root_marker.children[0] = sub_tree_marker
still_leaf_marker = root_marker.children[1]
now_sub_tree_marker = root_marker.children[0]
self.assertTrue(still_leaf_marker.is_leaf())
self.assertFalse(now_sub_tree_marker.is_leaf())
def test_get_multi(self):
from furious.marker_tree.identity_utils import leaf_persistence_id_from_group_id
from furious.marker_tree.marker import Marker
root_marker = Marker(id="freddy")
for x in xrange(3):
root_marker.children.append(
Marker(id=leaf_persistence_id_from_group_id(
root_marker.id, x)))
root_marker._persist_whole_graph()
markers = Marker.get_multi([child.id for child in
root_marker.children])
self.assertEqual(len(markers), 3)
markers = Marker.get_multi([root_marker.children[0].id,
root_marker.children[1].id, "foobar"])
self.assertEqual(len(markers), 2)
def test_get_marker_tree_leaves(self):
"""
Make sure all the leaves of a marker are returned
as expected from marker._list_of_leaf_markers()
"""
from furious.marker_tree.identity_utils import leaf_persistence_id_from_group_id
from furious.marker_tree.marker import Marker
root_marker = Marker(id="polly")
for x in xrange(3):
root_marker.children.append(
Marker(id=leaf_persistence_id_from_group_id(
root_marker.id, x)))
originally_a_leaf_marker = root_marker.children[0]
sub_tree_marker = Marker(id=originally_a_leaf_marker.id,
children=[Marker(id=
leaf_persistence_id_from_group_id(originally_a_leaf_marker.id, i))
for i in xrange(3)])
root_marker.children[0] = sub_tree_marker
root_marker.persist()
leaves = root_marker._list_of_leaf_markers()
self.assertEqual(len(leaves), 5)
reloaded_root_marker = Marker.get(root_marker.id)
self.assertIsNotNone(reloaded_root_marker)
leaves = reloaded_root_marker._list_of_leaf_markers()
#no jobs run and updated, so there should be no
#leaves persisted yet
self.assertEqual(len(leaves), 0)
def test_individual_serialization(self):
"""
Make sure a marker with children as IDs
which is the state they would be in after loading
from the persistence layer, the children maintain
through serialization
"""
from furious.marker_tree.identity_utils import leaf_persistence_id_from_group_id
from furious.marker_tree.marker import Marker
from furious.job_utils import encode_callbacks
from furious.tests.marker_tree import dummy_success_callback
marker = Marker.from_dict(
{'id': 'test', 'callbacks':
encode_callbacks({'success': dummy_success_callback})})
self.assertEqual(marker.id, 'test')
marker2 = Marker.from_dict(marker.to_dict())
self.assertEqual(marker2.to_dict(), marker.to_dict())
root_marker = Marker(id="fun")
children = []
for x in xrange(10):
children.append(
Marker(id=
leaf_persistence_id_from_group_id(
root_marker.id, x)))
root_marker.children = [marker.id for marker in children]
root_dict = root_marker.to_dict()
self.assertTrue('children' in root_dict.keys())
self.assertEqual(len(children), len(root_dict['children']))
for index, child_id in enumerate(root_dict['children']):
self.assertEqual(children[index].id, child_id)
reconstituted_root = Marker.from_dict(root_dict)
self.assertEqual(len(reconstituted_root.children),
len(reconstituted_root.children_to_dict()))
def test_graph_serialization(self):
"""
Make sure when a marker tree graph is serialized
(to_dict), it gets deserialized(from_dict) with
all it's children intact as Markers
"""
from furious.marker_tree.identity_utils import leaf_persistence_id_from_group_id
from furious.marker_tree.marker import Marker
root_marker = Marker(id="jolly")
for x in xrange(3):
root_marker.children.append(
Marker(id=leaf_persistence_id_from_group_id(
root_marker.id, x)))
root_dict = root_marker.to_dict()
self.assertTrue('children' in root_dict.keys())
self.assertEqual(len(root_marker.children), len(root_dict['children']))
reconstituted_root = Marker.from_dict(root_dict)
self.assertIsInstance(reconstituted_root, Marker)
self.assertEqual(len(reconstituted_root.children),
len(root_marker.children))
self.assertEqual(len(reconstituted_root.children),
len(reconstituted_root.children_to_dict()))
for child in reconstituted_root.children:
self.assertIsInstance(child, Marker)
def test_get_group_id_from_group_id(self):
"""
Make sure all children can id their parent marker
when they have an independent id, but passed the parent
ID as a group_id
"""
from furious.marker_tree.marker import Marker
root_marker = Marker(id="polly")
for x in xrange(2):
root_marker.children.append(Marker(id=str(x),
group_id=root_marker.id))
for child in root_marker.children:
self.assertEqual(child.get_group_id(), "polly")
def test_get_group_id_from_leaf(self):
"""
Make sure all children can id their parent marker
when their id was created by prefixing with parent id
"""
from furious.marker_tree.identity_utils import leaf_persistence_id_from_group_id
from furious.marker_tree.marker import Marker
root_marker = Marker(id="polly")
for x in xrange(3):
root_marker.children.append(
Marker(id=leaf_persistence_id_from_group_id(
root_marker.id, x)))
for child in root_marker.children:
self.assertEqual(child.get_group_id(), "polly")
def test_persist_marker_tree_graph(self):
"""
Make sure when a marker tree is persisted,
it only saves the non-leaf nodes and the
children properties contains only IDs
"""
from furious.marker_tree.identity_utils import leaf_persistence_id_from_group_id
from furious.marker_tree.marker import Marker
root_marker = Marker(id="peanut")
for x in xrange(3):
root_marker.children.append(
Marker(id=leaf_persistence_id_from_group_id(
root_marker.id, x)))
root_marker.persist()
loaded_marker = Marker.get(root_marker.id)
self.assertIsNotNone(loaded_marker)
self.assertEqual(root_marker.id, loaded_marker.id)
self.assertEqual(root_marker.done, loaded_marker.done)
self.assertEqual(len(root_marker.children),
len(loaded_marker.children))
children_ids = [child.id for child in root_marker.children]
loaded_children_ids = loaded_marker.children
for index, idx in enumerate(children_ids):
self.assertEqual(idx, loaded_children_ids[index])
loaded_child_marker = Marker.get(root_marker.children[0].id)
self.assertIsNone(loaded_child_marker)
@patch('furious.context.get_current_context', auto_spec=True)
def test_persist_tree_after_tasks_inserted(self,
mock_get_current_context):
"""
Make sure a marker tree isn't(Raises exception)
persisted after the current context's tasks have
been inserted
"""
from furious.context.context import Context
from furious.marker_tree.identity_utils import leaf_persistence_id_from_group_id
from furious.marker_tree.marker import Marker
from furious.marker_tree.exceptions import NotSafeToSave
a_context = Context(id="zebra")
a_context._tasks_inserted = True
mock_get_current_context.return_value = a_context
root_marker = Marker(id="zebra")
for x in xrange(3):
root_marker.children.append(
Marker(id=leaf_persistence_id_from_group_id(
root_marker.id, x)))
self.assertRaises(NotSafeToSave, root_marker.persist)
def test_persist_internal_node_marker(self):
"""
Make sure internal nodes are saved during the persistence
of a marker tree graph.
"""
from furious.marker_tree.identity_utils import leaf_persistence_id_from_group_id
from furious.marker_tree.marker import Marker
root_marker = Marker(id="cracker")
for x in xrange(2):
root_marker.children.append(Marker(
id=str(x),
group_id=root_marker.id,
children=[
Marker(id=leaf_persistence_id_from_group_id(str(x), i))
for i in xrange(3)]
))
root_marker.persist()
internal_node1 = root_marker.children[0]
leaf_node2 = internal_node1.children[1]
loaded_internal = Marker.get(internal_node1.id)
self.assertIsNotNone(loaded_internal)
loaded_leaf = Marker.get(leaf_node2.id)
self.assertIsNone(loaded_leaf)
def test_persist_leaf_marker(self):
"""
Make sure a leaf marker is saved when it persists
itself but only during the update_done process.
Make sure that loaded leaf marker can id it's parent
marker
"""
from furious.marker_tree.identity_utils import leaf_persistence_id_from_group_id
from furious.marker_tree.marker import Marker
from furious.marker_tree.exceptions import NotSafeToSave
root_marker = Marker(id="heart")
for x in xrange(3):
root_marker.children.append(
Marker(id=leaf_persistence_id_from_group_id(
root_marker.id, x)))
leaf_marker = root_marker.children[0]
self.assertRaises(NotSafeToSave, leaf_marker.persist)
leaf_marker._update_done_in_progress = True
leaf_marker.persist()
loaded_marker = Marker.get(leaf_marker.id)
self.assertIsNotNone(loaded_marker)
self.assertIsInstance(loaded_marker, Marker)
self.assertTrue(loaded_marker.get_group_id(), 'heart')
@patch('furious.marker_tree.marker.count_marked_as_done',
auto_spec=True)
@patch('furious.marker_tree.marker.count_update', auto_spec=True)
def test_update_done_of_leaf_travels_to_root_when_last(
self,
mock_count_update,
mock_count_marked_as_done):
"""
Make sure when all but one marker is done and it
runs an update_done, the processes will bubble
up to the root marker and it's update done will be called.
How many times is Marker.update_done called?
2
\
------
\ \
3 3
---- ----
\ \ \ \ \ \
1 1 1 1 1 1
Each leaf node calls itself once.
Each internal vertex is called during bubble_up_done
of each child.
The first time the root marker.update_done is run, the
right child node is not done
"""
mock_count_update.return_value = None
from furious.marker_tree.identity_utils import leaf_persistence_id_from_group_id
from furious.marker_tree.marker import Marker
from furious.tests.marker_tree import dummy_success_callback
from furious.tests.marker_tree import dummy_internal_vertex_combiner
from furious.tests.marker_tree import dummy_leaf_combiner
context_callbacks = {
'success': dummy_success_callback,
'internal_vertex_combiner': dummy_internal_vertex_combiner,
'leaf_combiner': dummy_leaf_combiner
}
root_marker = Marker(id="delve",
callbacks=context_callbacks)
for x in xrange(2):
root_marker.children.append(Marker(
id=str(x),
group_id=root_marker.id,
callbacks=context_callbacks,
children=[
Marker(id=leaf_persistence_id_from_group_id(
str(x), i)) for i in xrange(3)]
))
root_marker.persist()
with patch('furious.tests.marker_tree.'
'dummy_success_callback', auto_spec=True) \
as mock_success_callback:
with patch('furious.tests.marker_tree.'
'dummy_leaf_combiner', auto_spec=True) \
as mock_leaf_combiner:
with patch('furious.tests.marker_tree.'
'dummy_internal_vertex_combiner', auto_spec=True) \
as mock_internal_vertex_combiner:
mock_leaf_combiner.return_value = ["1"]
mock_internal_vertex_combiner.return_value = ["2"]
for internal_node in root_marker.children:
for leaf_node in internal_node.children:
leaf_node.done = True
leaf_node.result = 1
leaf_node.update_done(persist_first=True)
loaded_root_marker = Marker.get("delve")
self.assertTrue(loaded_root_marker.done)
self.assertEqual(mock_count_update.call_count, 14)
#9 is the number of nodes in the graph
self.assertEqual(mock_count_marked_as_done.call_count, 9)
#pretend a task was run again later on after
#the root had succeeded, it should only
#reach it's parent node and that should
#not bubble up
leaf_node = root_marker.children[0].children[1]
leaf_node.update_done(persist_first=True)
self.assertEqual(mock_count_update.call_count, 16)
mock_success_callback.assert_called_once_with("delve", ['2'])
#one for each non-leaf node
self.assertEqual(mock_internal_vertex_combiner.call_count, 3)
self.assertEqual(mock_leaf_combiner.call_count, 2)
``` |
{
"source": "Johnlon/integrated-circuit-tester",
"score": 3
} |
#### File: Johnlon/integrated-circuit-tester/reconnect.py
```python
import serial
import time
import queue
import tkinter as tk
def serialLoop(serialPortVar, responseHandler, queue):
# serialPortVar is a stringVar
# queue is an instance of Queue class
ser = None
current = serialPortVar#.get()
while True:
try:
port = serialPortVar#.get()
if port != current:
current=port
ser.close()
ser = None
else:
if ser is None:
responseHandler("Connecting: %s\n" % port)
ser = serial.Serial(port, timeout = 0.05)
# ser.parity = "O"
# ser.bytesize = 8
# give arduino a chance to respond
time.sleep(2)
l = ser.readline()
while len(l) > 0:
line = l.decode("utf-8")
if line.startswith("READY"):
responseHandler("\n")
else:
responseHandler(line)
l = ser.readline()
# only send one line at a time then process all responses
if not queue.empty():
w = queue.get()
responseHandler("SEND: " + w.strip()+ "\n")
ser.write(w.encode("utf-8"))
ser.write("\n".encode("utf-8"))
# wait for some response
while ser.in_waiting==0:
time.sleep(0.05)
except BaseException as e:
if ser is not None:
responseHandler("Disconnecting: %s\n" % str(e))
ser.close()
ser = None
else:
responseHandler("No Connection: %s\n" % str(e))
time.sleep(1)
def resp(a):
print(a, end='')
q = queue.Queue()
q.put("?")
q.put("1")
port = "com6"
serialLoop(port, resp, q)
```
#### File: Johnlon/integrated-circuit-tester/serialMonitor.py
```python
from serial import Serial
import time
import string
from pathlib import Path
import sys
import threading
import time
import platform
from os import _exit
home = str(Path.home())
## compat
try:
import readline
except ImportError:
import pyreadline as readline
def loadLastSerialPortConf():
try:
with open(home + "/.ictester") as f:
content = f.readlines()
if len(content)>0:
return content[0].strip()
else:
None
except:
pass
def saveSerialPortConf(comNum):
with open(home + "/.ictester", "w") as f:
f.write(comNum)
def selectPort():
default = loadLastSerialPortConf()
print("serial port number [%s] ? " % default, end="")
sys.stdout.flush()
c = sys.stdin.readline().strip()
if c.strip() != "":
com = c
elif default:
com=default
else:
print("must select port")
return selectPort()
saveSerialPortConf(com)
if com.isdigit():
if (platform.system() == "Linux"):
port = '/dev/ttyS' + com
else:
port = 'com' + com
else:
port = com
print("port is " + port)
return port
class TesterInterface():
def _recvResponse(resp):
print("%s" % resp, end='')
sys.stdout.flush()
def __init__(self, *args, **kwargs):
self.responseHandler = None
self.ard = None
self.serialThreadRunning = False
def open(self, serialPort, responseHandler = _recvResponse):
self.responseHandler = responseHandler
self.responseHandler("opening " + serialPort + "\n")
if self.ard:
try:
self.ard.cancel_read()
self.ard.close()
except BaseException as e:
self.responseHandler(str(e))
time.sleep(1)
self.ard = Serial(serialPort, 9600, timeout=1)
self.ard.parity = "O"
self.ard.bytesize = 7
self.responseHandler("opened " + serialPort + "\n")
self.startResponseThread()
def close(self):
self.ard.close()
def _keyboardInput(self):
try:
while True:
line = sys.stdin.readline().strip()
if (line == "q"):
break
# write to arduino
if len(line) > 0:
self.writeToTester(line)
print("QUIT")
_exit(0)
except BaseException as x:
print("ERR READING STDIN\n")
print(x)
_exit(1)
def _readSerial(self):
self.serialThreadRunning = True
current = self.ard
while self.ard and self.ard == current:
msg = None
try:
msg = self.ard.readline()
msg = msg.decode("utf-8") # .strip()
except BaseException as e:
self.responseHandler("ERR: while reading from arduino : %s %s\n" % (type(e), str(e)))
if msg:
self.responseHandler("ERR: read '%s' + \n" % msg)
break
if msg and (len(msg) > 0):
self.responseHandler(msg)
self.serialThreadRunning = False
def startKeyboardThread(self):
if not self.ard:
raise Exception("Tester port not open")
# thread to read local keyboard input
input_thread = threading.Thread(target=self._keyboardInput)
input_thread.daemon = False
input_thread.start()
def startResponseThread(self):
if not self.ard:
raise Exception("Tester port not open")
# thread to read and print data from arduino
sinput_thread = threading.Thread(target=self._readSerial)
sinput_thread.daemon = True
sinput_thread.start()
def writeToTester(self, testcase):
try:
self.ard.write(testcase.encode("utf-8"))
self.ard.write("\n".encode("utf-8"))
except BaseException as x:
self.responseHandler("EXCEPTION : " + str(x))
# Support for CTRL-C needs main thread still running.
# This is actually a crucial step, otherwise all signals sent to your program will be ignored.
# Adding an infinite loop using time.sleep() after the threads have been started will do the trick:
def main():
com = selectPort()
tester = TesterInterface()
try:
tester.open(serialPort=com)
tester.startKeyboardThread()
while True:
if not tester.serialThreadRunning:
print("reopening..")
try:
tester.close()
tester.open(serialPort=com)
tester.startResponseThread()
except BaseException as e:
pass
#print("error while reopening", e)
time.sleep(1)
except KeyboardInterrupt:
_exit(1)
except BaseException as x:
try:
print("ERR3 %s" % x)
tester.close()
sys.stdin.close()
_exit(1)
except:
_exit(1)
if __name__ == '__main__':
main()
``` |
{
"source": "johnlouieabrina-acn/ctwrap",
"score": 3
} |
#### File: ctwrap/ctwrap/handler.py
```python
from pathlib import Path
import warnings
import textwrap
from typing import Dict, Any, Optional, Union
# multiprocessing
import multiprocessing as mp
from multiprocessing import queues as mpq
from multiprocessing import synchronize as mps
import queue # imported for using queue.Empty exception
try:
import ruamel_yaml as yaml
except ImportError:
from ruamel import yaml
# ctwrap specific import
from .parser import _parse, _write, Parser
from .strategy import Strategy, Sequence, Legacy, Matrix
from .wrapper import Simulation
from .output import Output
indent1 = ' * '
indent2 = ' - '
class SimulationHandler(object):
"""
Class handling parameter variations.
Class adds methods to switch between multiple configurations.
.. note:: :class:`SimulationHandler` objects should be instantiated
using factory methods :meth:`from_yaml` or :meth:`from_dict`.
Arguments:
strategy: Batch simulation strategy
defaults: Dictionary containing simulation defaults
output: Dictionary specifying file output
verbosity: Verbosity level
"""
def __init__(self,
strategy: Optional[Strategy]=None,
defaults: Dict[str, Any]=None,
output: Optional[Dict[str, Any]]=None,
verbosity: Optional[int]=0):
"""Constructor for `SimulationHandler` object."""
# parse arguments
self._strategy = strategy
self._defaults = defaults
self.verbosity = verbosity # type: int
if output is not None:
out = Output.from_dict(output)
if out.force:
dest = Path(out.output_name)
if dest.is_file():
dest.unlink()
output = out.settings
self._output = output
self._variations = self._strategy.variations
self._configurations = self._strategy.configurations(self._defaults)
if self.verbosity:
msg = textwrap.wrap(self._strategy.info, 80)
print('\n'.join(msg))
@property
def metadata(self):
return {
'defaults': self._defaults,
'strategy': self._strategy.definition,
'cases': self._variations
}
@classmethod
def from_yaml(cls, yaml_file: str,
strategy: Optional[str]=None,
output_name: Optional[str]=None,
database: Optional[str]=None,
**kwargs: Optional[Dict]):
"""
Alternate constructor using YAML file as input.
The :meth:`from_yaml` method is intended as the main route for the creation of
:class:`SimulationHandler` objects.
Arguments:
yaml_file: YAML file located in current folder, *database* (argument),
or ctwrap's preconfigured YAML database (``yaml`` folder)
strategy: Batch job strategy name (only needed if more than one are defined)
output_name: Output name (overrides YAML configuration)
database: File database (both YAML configuration and output)
**kwargs: Dependent on implementation
"""
if 'path' in kwargs:
database = kwargs.pop['path'] # pylint: disable=unsubscriptable-object
warnings.warn("Parameter 'path' is superseded by 'database'", DeprecationWarning)
# load configuration from yaml
if database is not None:
full_name = Path(database) / yaml_file
elif not Path(yaml_file).is_file():
# attempt to load standard configuration
full_name = Path(__file__).parents[0] / 'yaml' / yaml_file
else:
full_name = Path(yaml_file)
if not full_name.is_file():
raise IOError("Unable to locate YAML configuration file '{}'"
"".format(yaml_file))
with open(full_name) as stream:
content = yaml.load(stream, Loader=yaml.SafeLoader)
output = content.get('output', {})
# naming priorities: keyword / yaml / automatic
if output_name is None:
output_name = '{}'.format(Path(yaml_file).parents[0] / full_name.stem)
output_name = output.get('name', output_name)
return cls.from_dict(content, strategy=strategy, output_name=output_name, output_path=database, **kwargs)
@classmethod
def from_dict(cls, content: Dict[str, Any],
strategy: Optional[str]=None,
output_name: Optional[str]=None,
output_path: Optional[str]=None,
**kwargs: Optional[Dict]
) -> Union['SimulationHandler', Dict[str, Any], str]:
"""
Alternate constructor using a dictionary as input.
Arguments:
content: Dictionary from YAML input
strategy: Batch job strategy name (only needed if more than one are defined)
output_name: Output name (overrides YAML configuration)
output_path: Output path (overrides YAML configuration)
**kwargs: Dependent on implementation
"""
assert 'ctwrap' in content, 'obsolete yaml file format'
assert 'defaults' in content, 'obsolete yaml file format'
if 'name' in kwargs:
output_name = kwargs.pop['name'] # pylint: disable=unsubscriptable-object
warnings.warn("Parameter 'name' is superseded by 'output_name'", DeprecationWarning)
if 'path' in kwargs:
output_path = kwargs.pop['path'] # pylint: disable=unsubscriptable-object
warnings.warn("Parameter 'path' is superseded by 'output_path'", DeprecationWarning)
if 'variation' in content and isinstance(content['variation'], dict):
strategies = Legacy.convert(content['variation'])
strategy = None
warnings.warn("Old implementation", PendingDeprecationWarning)
elif 'strategy' in content and isinstance(content['strategy'], dict):
strategies = content['strategy']
else:
raise ValueError("Missing or invalid argument: need 'strategy' or 'variation' entry in dictionary")
strategy = Strategy.load(strategies, name=strategy)
defaults = content['defaults']
output = content.get('output', None)
if output is not None:
output = Output.from_dict(output, file_name=output_name, file_path=output_path).settings
return cls(strategy=strategy, defaults=defaults, output=output, **kwargs)
def __iter__(self):
"""Returns itself as iterator"""
for task in self._variations:
yield task
def __getitem__(self, task: str):
return self._variations(task)
def configuration(self, task: str):
"""
Return configuration.
Arguments:
task: Task to do
Returns:
updated configuration dictionary based on the task
"""
return self._configurations[task]
@property
def verbose(self):
"""verbosity"""
return self.verbosity > 0
@property
def output_name(self):
"""return output name"""
if self._output['path'] is None:
return self._output['name']
else:
return Path(self._output['path']) / self._output['name']
@property
def tasks(self):
"""tasks defined in terms of the variation entry and values"""
return self._variations
def run_task(self, sim: Simulation, task: str, **kwargs: str):
"""
Function to run a specific task.
The :meth:`run_task` method calls the module's run method and
saves the resulting output and metadata. A simple example is:
.. code-block:: Python
# Runs the task `sleep_0.4` using `sim` object
sh.run_task(sim, 'sleep_0.4' )
Arguments:
sim: instance of :class:`Simulation` class
task: task to do
**kwargs: dependent on implementation
"""
# pylint: disable=no-member
assert task in self._variations, 'unknown task `{}`'.format(task)
if kwargs:
warnings.warn("Keyword arguments are deprecated and ignored", DeprecationWarning)
# create a new simulation object
obj = Simulation.from_module(sim._module)
# run simulation
obj.run(self.configuration(task))
if self._output and obj.data:
out = Output.from_dict(self._output)
out.save(obj.data, entry=task, variation=self._variations[task])
out.finalize(self.metadata)
def _setup_batch(self, parallel: bool=False):
"""Create batch queues used by worker function"""
if parallel:
tasks_to_accomplish = mp.Queue()
else:
tasks_to_accomplish = queue.Queue()
for task, config in self._configurations.items():
tasks_to_accomplish.put((task, config))
return tasks_to_accomplish
def run_serial(self,
sim: Simulation,
verbosity: Optional[int]=None,
**kwargs: str) -> bool:
"""
Run variation in series.
The :meth:`run_serial` method runs all the strategy entries in the input
file in serial and also saves metadata. A simple usage example is:
.. code-block:: Python
# Runs all the variations serially
sh.run_serial(sim)
Arguments:
sim: instance of :class:`Simulation` class
verbosity: verbosity
**kwargs: dependent on implementation
Returns:
True when task is completed
"""
assert isinstance(sim, Simulation), 'need simulation object'
if kwargs:
warnings.warn("Keyword arguments are deprecated and ignored", DeprecationWarning)
if verbosity is None:
verbosity = self.verbosity
if verbosity > 0:
print(indent1 + 'Starting serial batch simulation')
# set up queues and dispatch worker
tasks_to_accomplish = self._setup_batch(parallel=False)
lock = None
_worker(sim._module, self._strategy.definition, self._output,
tasks_to_accomplish, lock, verbosity)
if self._output is not None:
out = Output.from_dict(self._output)
out.finalize(self.metadata)
return True
def run_parallel(self,
sim: Simulation,
number_of_processes: Optional[int]=None,
verbosity: Optional[int]=None,
**kwargs: Optional[Any]) -> bool:
"""
Run variation using multiprocessing.
The :meth:`run_parallel` method runs all the strategy entries in the input
file in parallel using
`python multiprocessing <https://docs.python.org/3/library/multiprocessing.html>`_.
and also saves metadata.
.. code-block:: Python
# Runs all the variations in parallel
sh.run_parallel(sim) # run
Arguments:
sim: instance of Simulation class
number_of_processes: number of processes
verbosity: verbosity level
**kwargs: dependent on implementation
Returns:
True when task is completed
"""
assert isinstance(sim, Simulation), 'need simulation object'
if kwargs:
warnings.warn("Keyword arguments are deprecated and ignored", DeprecationWarning)
if number_of_processes is None:
number_of_processes = mp.cpu_count() // 2
if verbosity is None:
verbosity = self.verbosity
if verbosity > 0:
print(indent1 + 'Starting parallel batch simulation using ' +
'{} cores'.format(number_of_processes))
# set up queues and lock
tasks_to_accomplish = self._setup_batch(parallel=True)
lock = mp.Lock()
# creating processes
processes = []
for _ in range(number_of_processes):
p = mp.Process(
target=_worker,
args=(sim._module, self._strategy.definition, self._output,
tasks_to_accomplish, lock, verbosity))
p.start()
processes.append(p)
# completing process
for p in processes:
p.join()
if self._output is not None:
if verbosity > 1:
print(indent1 + "Appending metadata")
out = Output.from_dict(self._output)
out.finalize(self.metadata)
return True
def _worker(
module: str,
strategy: Dict[str, Any],
output: Dict[str, Any],
tasks_to_accomplish: mpq.Queue,
lock: mps.Lock,
verbosity: int
) -> True:
"""
Worker function running simulation queues.
Arguments:
module: Name of simulation module to be run
tasks_to_accomplish: Queue of remaining tasks
lock: Multiprocessing lock (used for parallel simulations only)
output: Dictionary containing output information
verbosity: Verbosity level
Returns:
True when tasks are completed
"""
# pylint: disable=no-member
parallel = isinstance(lock, mp.synchronize.Lock)
if parallel:
this = mp.current_process().name
else:
this = 'main'
if verbosity > 1 and parallel:
print(indent2 + 'starting ' + this)
# create local copy of strategy object
strategy = Strategy.load(strategy)
variations = strategy.variations # pylint: disable=no-member
tasks = set(variations.keys())
# create local copy of output object
if isinstance(output, dict):
out = Output.from_dict(output)
else:
out = None
other = None
reschedule = 0
while reschedule < len(tasks):
try:
# retrieve next simulation task
task, config = tasks_to_accomplish.get_nowait()
except queue.Empty:
# no tasks left
if verbosity > 1 and parallel:
print(indent2 + 'terminating ' + this)
break
try:
# create object
obj = Simulation.from_module(module)
base = strategy.base(task)
restart = None
if obj.has_restart and out and base:
if parallel:
with lock:
done = set(out.dir())
restart = out.load_like(base, other)
else:
done = set(out.dir())
restart = out.load_like(base, other)
invalid = done - tasks # empty if labels follow task names
if restart is None and not invalid:
# need to pick another task
if verbosity > 1:
print(indent2 + 'rescheduling {} ({})'.format(task, this))
tasks_to_accomplish.put((task, config))
reschedule += 1
continue
if verbosity > 0:
msg = indent1 + 'running `{}` ({})'
print(msg.format(task, this))
# run task
if restart is None:
obj.run(config)
else:
obj.restart(restart, config)
data = obj.data
errored = False
except Exception as err:
# Convert exception to warning
msg = "Simulation of '{}' for '{}' failed with error message:\n{}".format(type(obj).__name__, task, err)
warnings.warn(msg, RuntimeWarning)
data = {task: (type(err).__name__, str(err))}
errored = True
# save output
if out and obj.data:
if parallel:
with lock:
out.save(data, entry=task, variation=variations[task], errored=errored)
else:
out.save(data, entry=task, variation=variations[task], errored=errored)
other = obj.data
return True
```
#### File: ctwrap/modules/equilibrium.py
```python
import warnings
from ctwrap import Parser
# pylint: disable=no-member
try:
import cantera as ct
except ImportError as err:
ct = ImportError('Method requires a working cantera installation.')
def defaults():
"""Returns Parser object containing default configuration"""
return Parser.from_yaml('equilibrium.yaml', defaults=True)
def run(initial, phases, equilibrate):
"""Function handling equilibrium calculations.
The function handles equilibrium calculations for both single
phases (``Solution``; single entry in *phases* argument) and multiple
phases (``Mixture``; multiple entries in *phases* argument).
Arguments:
initial (Parser): Initial condition
phases (Parser): Definition of phases
equilibrate (Parser): Arguments of ``equilibrate`` function
Returns:
Cantera `Solution` or `Mixture` object
"""
T = initial.T.m_as('kelvin')
P = initial.P.m_as('pascal')
# phases that will be included in the calculation, and their initial moles
mix_phases = []
for phase in phases.values():
if phase is None:
continue
obj = ct.Solution(phase.mechanism)
if all([key in phase for key in ['fuel', 'oxidizer']] + ['phi' in initial]) :
obj.TP = T, P
obj.set_equivalence_ratio(initial.phi, phase.fuel, phase.oxidizer)
elif 'X' in phase:
obj.TPX = T, P, phase.X
elif 'Y' in phase:
obj.TPY = T, P, phase.Y
mix_phases.append((obj, phase.get('moles')))
# equilibrate the mixture based on configuration
if len(mix_phases) > 1:
obj = ct.Mixture(mix_phases)
obj.T = T
obj.P = P
kwargs = equilibrate.raw
mode = kwargs.pop('mode')
obj.equilibrate(mode, **kwargs)
print('Tad = {:8.2f}'.format(obj.T))
return obj
if __name__ == "__main__":
""" Main function """
config = defaults()
out = run(**config)
```
#### File: ctwrap/modules/freeflame.py
```python
import warnings
from ctwrap import Parser
# pylint: disable=no-member
try:
import cantera as ct
except ImportError as err:
ct = ImportError('Method requires a working cantera installation.')
def defaults():
"""Returns Parser object containing default configuration"""
return Parser.from_yaml('freeflame.yaml', defaults=True)
def restart(base, **kwargs):
"""Restart calculation"""
return run(restart=base, **kwargs)
def run(model=None, upstream=None, domain=None, settings=None, restart=None):
"""Function handling adiabatic flame simulation.
The function uses the class 'ctwrap.Parser' in conjunction with 'pint.Quantity'
for handling and conversion of units.
Arguments:
model (Parser): overloads 'defaults.model'
upstream (Parser): overloads 'defaults.upstream'
domain (Parser): overloads 'defaults.simulation'
settings (Parser): overloads 'defaults.settings'
restart (ct.FlameBase): previous solution
Returns:
Cantera `FlameBase` object
"""
# initialize
# IdealGasMix object used to compute mixture properties, set to the state of the
# upstream fuel-air mixture
gas = ct.Solution(model.mechanism)
# temperature, pressure, and composition
T = upstream.T.m_as('kelvin')
P = upstream.P.m_as('pascal')
gas.TP = T, P
phi = upstream.phi
gas.set_equivalence_ratio(phi, upstream.fuel, upstream.oxidizer)
if restart:
f = restart
f.P = P
f.inlet.T = T
f.inlet.X = gas.X
auto = False
else:
# set up flame object
width = domain.width.m_as('meter')
f = ct.FreeFlame(gas, width=width)
auto = True
if model.transport.lower() != 'mix':
raise ValueError("Initial simulation should use mixture-averaged transport")
f.set_refine_criteria(ratio=settings.ratio, slope=settings.slope, curve=settings.curve)
if model.transport.lower() == 'soret':
f.transport_model = 'Multi'
f.soret_enabled = True
else:
f.transport_model = model.transport.capitalize()
# Solve with mixture-averaged transport model
f.solve(loglevel=settings.loglevel, auto=auto)
# Solve with the energy equation enabled
msg = ' flamespeed = {:7f} m/s ({})'
print(msg.format(f.velocity[0], model.transport))
return f
if __name__ == "__main__":
""" Main function """
config = defaults()
out = run(**config)
```
#### File: ctwrap/tests/test_notebooks.py
```python
import unittest
import subprocess
import warnings
from pathlib import Path
ROOT = Path(__file__).parents[1]
NB_DIR = ROOT / 'docs' / 'examples'
YAML_DIR = ROOT / 'ctwrap' / 'yaml'
warnings.filterwarnings(action='error')
warnings.filterwarnings("ignore", ".*inconsistent pixels*")
def get(nbname, nbpath):
# use nbconvert to execute the notebook
def test_func(self):
print('\n--------------- Testing {0} ---------------'.format(nbname))
print(' {0}'.format(nbpath))
# execute the notebook using nbconvert to generate html
nbexe = subprocess.Popen(['jupyter', 'nbconvert', '--to', 'html',
'{0}'.format(nbpath),
'--execute',
'--ExecutePreprocessor.timeout=180'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, err = nbexe.communicate()
failed = nbexe.returncode
if failed:
print('\n <<<<< {0} FAILED >>>>> \n'.format(nbname))
print('Captured Output: \n\n{0}'.format(err.decode("utf-8")))
else:
print('\n ..... {0} Passed ..... \n'.format(nbname))
# if passed remove the generated html file
subprocess.call(['rm', str(nbpath.with_suffix('.html'))])
self.assertFalse(failed)
return test_func
attrs = dict()
# build test for each notebook
sensor_notebooks = {f.stem: f for f in NB_DIR.glob('*.ipynb')}
for nb in sensor_notebooks:
attrs['test_'+nb] = get(nb, sensor_notebooks[nb])
class TestPurgeHDF(unittest.TestCase):
def setUp(self):
[hdf.unlink() for hdf in Path(YAML_DIR).glob('*.h5')]
def tearDown(self):
[hdf.unlink() for hdf in Path(YAML_DIR).glob('*.h5')]
# create class to unit test notebooks
TestNotebooks = type('TestNotebooks', (TestPurgeHDF,), attrs)
if __name__ == '__main__':
unittest.main()
```
#### File: ctwrap/tests/test_output.py
```python
import pytest
import unittest
from pathlib import Path
import h5py
try:
import ruamel_yaml as yaml
except ImportError:
from ruamel import yaml
import warnings
# add exception as pywintypes imports a deprecated module
warnings.filterwarnings("ignore", ".*the imp module is deprecated*")
# pylint: disable=import-error
# pylint: disable=no-member
import ctwrap.output as cwo
import pkg_resources
# avoid explicit dependence on cantera
try:
pkg_resources.get_distribution('cantera')
except pkg_resources.DistributionNotFound:
ct = ImportError('Method requires a working cantera installation.')
else:
import cantera as ct
PWD = Path(__file__).parents[0]
ROOT = PWD.parents[0]
EXAMPLES = ROOT / 'ctwrap' / 'yaml'
class TestOutput(unittest.TestCase):
_yaml = 'minimal.yaml'
_config = None
_out = None
@classmethod
def setUpClass(cls):
with open(EXAMPLES / cls._yaml) as stream:
cls._config = yaml.load(stream, Loader=yaml.SafeLoader)
out = cls._config.get('output')
if out:
cls._output = cwo.Output.from_dict(
out, file_name=cls._out, file_path=PWD
)
else:
cls._output = None
def tearDown(self):
if not self._output:
return
fname = Path(self._output.output_name)
if fname.is_file():
fname.unlink()
def test_output(self):
if not self._output:
return
self.assertIsInstance(self._output, cwo.Output)
self.assertEqual(self._output.settings['name'], self._out)
def test_dir(self):
if self._out:
self.assertEqual(set(self._output.dir()), {'foo', 'bar'})
class TestCSV(TestOutput):
def test_csv(self):
if not self._output:
return
self.assertIsInstance(self._output, cwo.WriteCSV)
self.assertEqual(self._output.settings['format'], 'csv')
@pytest.mark.skipif(isinstance(ct, ImportError), reason="Cantera not installed")
class TestSolution(TestCSV):
_yaml = 'equilibrium.yaml'
_out = 'solution.csv'
def setUp(self):
self._gas = ct.Solution('h2o2.yaml')
self._output.save(self._gas, 'foo')
self._gas.TP = 500, ct.one_atm
self._output.save(self._gas, 'bar')
def test_add(self):
self._gas.TP = 700, ct.one_atm
self._output.save(self._gas, 'spam', variation={'eggs': 1.})
self.assertEqual(self._output.dir(), ['foo', 'bar', 'spam'])
@pytest.mark.skipif(isinstance(ct, ImportError), reason="Cantera not installed")
class TestMixture(TestCSV):
_yaml = 'equilibrium_multi.yaml'
_out = 'mixture.csv'
def setUp(self):
self._gas = ct.Solution('h2o2.yaml')
self._carbon = ct.Solution('graphite.yaml')
self._mix = ct.Mixture([self._gas, self._carbon])
self._output.save(self._mix, 'foo')
self._mix.T = 500
self._output.save(self._mix, 'bar')
def test_add(self):
self._mix.T = 700
self._output.save(self._mix, 'spam', variation={'eggs': 1.})
self.assertEqual(self._output.dir(), ['foo', 'bar', 'spam'])
class TestHDF(TestOutput):
def test_hdf(self):
if not self._output:
return
self.assertIsInstance(self._output, cwo.WriteHDF)
self.assertEqual(self._output.settings['format'], 'h5')
@pytest.mark.skipif(isinstance(ct, ImportError), reason="Cantera not installed")
class TestSolutionArray(TestHDF):
_yaml = 'ignition.yaml'
_out = 'solutionarray.h5'
def setUp(self):
self._gas = ct.Solution('h2o2.yaml')
self._arr = ct.SolutionArray(self._gas, 2)
self._arr.TP = 300., ct.one_atm
self._output.save(self._arr, 'foo')
self._arr.TP = 500., ct.one_atm
self._output.save(self._arr, 'bar')
def test_load_like(self):
baz = self._output.load_like('foo', self._arr)
self.assertEqual(baz.T[0], 300.)
@pytest.mark.skipif(isinstance(ct, ImportError), reason="Cantera not installed")
class TestFreeFlame(TestOutput):
_yaml = 'freeflame.yaml'
_out = 'freeflame.h5'
def setUp(self):
self._gas = ct.Solution('h2o2.yaml')
self._gas.TP = 300., ct.one_atm
f = ct.FreeFlame(self._gas)
self._output.save(f, 'foo')
self._gas.TP = 500., ct.one_atm
self._freeflame = ct.FreeFlame(self._gas)
self._output.save(self._freeflame, 'bar')
def test_load_like(self):
baz = self._output.load_like('foo', self._freeflame)
self.assertEqual(baz.T[0], 300.)
```
#### File: ctwrap/tests/test_parser.py
```python
import unittest
from pathlib import Path
import pint.quantity as pq
try:
import ruamel_yaml as yaml
except ImportError:
from ruamel import yaml
import warnings
# add exception as pywintypes imports a deprecated module
warnings.filterwarnings("ignore", ".*the imp module is deprecated*")
# pylint: disable=import-error
import ctwrap as cw
from ctwrap.parser import _parse, _write, _update
PWD = Path(__file__).parents[0]
ROOT = PWD.parents[0]
EXAMPLES = ROOT / 'ctwrap' / 'yaml'
class TestUpdate(unittest.TestCase):
_dict = {'foo': {'spam': 2.0, 'eggs': 3.14}, 'bar': 3, 'baz': 'hello world'}
def test_level1(self):
value = 4
out = _update(self._dict, {'bar': value})
self.assertEqual(self._dict['foo'], out['foo'])
self.assertEqual(self._dict['baz'], out['baz'])
self.assertEqual(out['bar'], value)
def test_level2(self):
value = 4
out = _update(self._dict, {'foo': {'eggs': value}})
self.assertEqual(self._dict['bar'], out['bar'])
self.assertEqual(self._dict['foo']['spam'], out['foo']['spam'])
self.assertEqual(self._dict['baz'], out['baz'])
self.assertEqual(out['foo']['eggs'], value)
def test_multi_level(self):
value1 = 4
value2 = 5
out = _update(self._dict, {'foo': {'eggs': value1}, 'bar': value2})
self.assertEqual(self._dict['foo']['spam'], out['foo']['spam'])
self.assertEqual(self._dict['baz'], out['baz'])
self.assertEqual(out['foo']['eggs'], value1)
self.assertEqual(out['bar'], value2)
class TestParse(unittest.TestCase):
def test_string(self):
value, unit = _parse('hello world')
self.assertEqual(value, 'hello world')
self.assertIsNone(unit)
def test_full(self):
value, unit = _parse('1 spam')
self.assertEqual(value, '1')
self.assertEqual(unit, 'spam')
def test_no_unit1(self):
value, unit = _parse('2.')
self.assertEqual(value, '2.')
self.assertIsNone(unit)
def test_no_unit2(self):
value, unit = _parse('2. ')
self.assertEqual(value, '2.')
self.assertEqual(unit, 'dimensionless')
class TestWrite(unittest.TestCase):
def test_string(self):
value = _write('hello world')
self.assertEqual(value, 'hello world')
def test_full(self):
value = _write(1, 'spam')
self.assertEqual(value, '1 spam')
def test_no_unit1(self):
value = _write(2., None)
self.assertEqual(value, '2.0')
def test_no_unit2(self):
value = _write(2., 'dimensionless')
self.assertEqual(value, '2.0')
class TestPassing(unittest.TestCase):
_entry = {'key': 1.}
def test_parser(self):
p = cw.Parser(self._entry)
self.assertEqual(self._entry, p.raw)
for k, v in p.items():
orig = self._entry[k]
if isinstance(v, cw.Parser):
self.assertEqual(orig, v.raw)
self.assertEqual(orig, p.get(k).raw)
else:
self.assertEqual(orig, v)
self.assertEqual(orig, p.get(k))
class TestKeyStr(TestPassing):
_entry = {'spam': 'eggs'}
class TestNone(TestPassing):
_entry = {'spam': None}
class TestParser(TestPassing):
_entry = cw.Parser({'key': 1.})
def test_parser(self):
p = cw.Parser(self._entry)
self.assertEqual(self._entry.raw, p.raw)
class TestMulti(TestParser):
_dict = {'foo': {'spam': 2.0, 'eggs': 3.14}, 'bar': 3, 'baz': '<NAME>'}
_entry = cw.Parser(_dict)
_new = {'foo': {'eggs': 4}, 'bar': 5}
def check(self, p):
self.assertEqual(p.foo.spam, self._entry.foo.spam)
self.assertEqual(p.baz, self._entry.baz)
self.assertEqual(p.foo.eggs, self._new['foo']['eggs'])
self.assertEqual(p.bar, self._new['bar'])
def test_update_dict(self):
p = cw.Parser(self._dict)
p.update(self._new)
self.check(p)
def test_update_parser(self):
p = self._entry
p.update(cw.Parser(self._new))
self.check(p)
class TestFailing(unittest.TestCase):
_entry = None
def test_parser(self):
with self.assertRaises(TypeError):
cw.Parser(self._entry)
class TestInt(TestFailing):
_entry = 1
class TestFloat(TestFailing):
_entry = 3.14
class TestString(TestFailing):
_entry = 'hello world'
class TestYAML(unittest.TestCase):
def test_minimal(self):
with open(EXAMPLES / 'minimal.yaml') as stream:
defaults = yaml.load(stream, Loader=yaml.SafeLoader)
p = cw.Parser.from_yaml('minimal.yaml', path=EXAMPLES)
self.assertEqual(len(p), len(defaults))
self.assertEqual(p.keys(), defaults.keys())
self.assertIn('defaults', p)
dd1 = {**p}
self.assertIsInstance(dd1['defaults'], cw.Parser)
dd2 = {key: val for key, val in p.items()}
self.assertEqual(dd1.keys(), dd2.keys())
self.assertEqual(dd1['defaults'], dd2['defaults'])
def test_ignition(self):
with open(EXAMPLES / 'ignition.yaml') as stream:
yml = yaml.load(stream, Loader=yaml.SafeLoader)
initial = cw.Parser(yml['defaults']['initial'])
self.assertIsInstance(initial.T, pq.Quantity)
self.assertIsInstance(initial['fuel'], str)
def test_freeflame(self):
parser = cw.Parser.from_yaml(
'freeflame.yaml', path=EXAMPLES, keys=['defaults'])
up = parser.defaults.upstream
self.assertIsInstance(up.T, pq.Quantity)
self.assertIsInstance(up.T.m, float)
self.assertEqual(str(up.T.units), 'kelvin')
self.assertEqual(up.T.m - 273.15, up.T.m_as('degC'))
self.assertIsInstance(up.fuel, str)
``` |
{
"source": "johnlouieabrina-acn/sphinx-action-test-1",
"score": 4
} |
#### File: sphinx-action-test-1/source/useful.py
```python
from typing import Dict, Union
def get_person(person: Dict[str, Union[str, int]]) -> Dict[str, Union[str, int]]:
"""This will be the summary of the function.
Args:
person: description of the argument.
first_name (str) - A name given to a person at birth.
last_name (str) - A define family name.
age (int) - Number of years person has lived.
Return:
result: description of the argument
first_name (str) - A name given to a person at birth.
last_name (str) - A define family name.
age (int) - Number of years person has lived.
"""
return person
person1 = {"first_name": "Juan", "last_name": "Carlos", "age": "16"}
print(get_person(person1))
def function_with_pep484_type_annotations(param1: int, param2: str) -> bool:
"""Example function with PEP 484 type annotations.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
The return value. True for success, False otherwise.
"""
def public_fn_with_googley_docstring(name: str, state: bool = None) -> int:
"""TEST This function does something.
Args:
name: The name to use.
Kwargs:
state: Current state to be in.
Returns:
int. The return code::
0 -- Success!
1 -- No good.
2 -- Try again.
Raises:
AttributeError, KeyError
A really great idea. A way you might use me is
>>> print public_fn_with_googley_docstring(name='foo', state=None)
0
BTW, this always returns 0. **NEVER** use with :class:`MyPublicClass`.
"""
return 0
def public_fn_with_sphinxy_docstring(name, state=None):
"""This function does something.
:param name: The name to use.
:type name: str.
:param state: Current state to be in.
:type state: bool.
:returns: int -- the return code.
:raises: AttributeError, KeyError
"""
return 0
def public_fn_without_docstring():
return True
def _private_fn_with_docstring(foo, bar='baz', foobarbas=None):
"""I have a docstring, but won't be imported if you just use ``:members:``.
"""
return None
class MyPublicClass(object):
"""We use this as a public class example class.
You never call this class before calling :func:`public_fn_with_sphinxy_docstring`.
.. note::
An example of intersphinx is this: you **cannot** use :mod:`pickle` on this class.
"""
def __init__(self, foo, bar='baz'):
"""A really simple class.
Args:
foo (str): We all know what foo does.
Kwargs:
bar (str): Really, same as foo.
"""
self._foo = foo
self._bar = bar
def get_foobar(self, foo, bar=True):
"""This gets the foobar
This really should have a full function definition, but I am too lazy.
>>> print get_foobar(10, 20)
30
>>> print get_foobar('a', 'b')
ab
Isn't that what you want?
"""
return foo + bar
def _get_baz(self, baz=None):
"""A private function to get baz.
This really should have a full function definition, but I am too lazy.
"""
return baz
``` |
{
"source": "johnlpage/gamebike",
"score": 2
} |
#### File: gamebike/gamebike/control.py
```python
from pprint import pprint
import struct
import math
import logging
import time
import hid
import gamebike.controlmapbits as cmb
#A small slide clicker with 4 buttons
#For Brakes etc.
CLICKER_VID=0x1d57
CLICKER_DID=0xad03
class Clicker(object):
def __init__(self):
logging.basicConfig(level=logging.INFO)
try:
self.clicker = hid.device()
logging.info("Opening Clicker")
self.clicker.open(CLICKER_VID, CLICKER_DID)
self.clicker.set_nonblocking(1)
except Exception as e:
logging.error(f"Unable to open Clicker - is it plugged in and do the VID and DID match? {e}\n")
self.clicker = False
def get_button(self):
if self.clicker:
clicker_data = bytearray(self.clicker.read(64))
if clicker_data :
byte = clicker_data[cmb.CLICKER_BUTTONS]
if byte in cmb.CLICKER_UP:
return 1
if byte in cmb.CLICKER_DOWN:
return 3
if byte in cmb.CLICKER_RIGHT:
return 2
if byte in cmb.CLICKER_LEFT:
return 4
return 0
if __name__ == "__main__":
print("Testing clicker standalone")
clicker = Clicker()
while True:
v = clicker.get_button()
if v>0:
logging.info(v)
```
#### File: gamebike/gamebike/__init__.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021 <NAME> <<EMAIL>>"
__license__ = "GPLv3-or-later"
__email__ = "<EMAIL>"
__version__ = "2.1"
import time
import logging
from gamebike.handlebar import Handlebar
from gamebike.speedsensor import WheelSpeedSensor
from gamebike.telemetry import Telemetry
from gamebike.resistance import Resistance
from gamebike.wheelcontroller import VirtualWheel
from gamebike.control import Clicker
SPEEDGRACE = 1.5
PEDALRATE = 220
PEDALNEUTRAL = 135
WHEEL_MULTIPLIER=2.21
class GameBike(object):
def __init__(self):
logging.basicConfig(level=logging.INFO)
logging.info("Controller")
self.gamecontroller = VirtualWheel()
self.gamecontroller.setup_wheel()
logging.info("Wheel Speed")
self.speedsensor = WheelSpeedSensor()
rpm = self.speedsensor.getRPM()
logging.info("Telemetry")
self.telemetry = Telemetry()
logging.info("Resistance")
self.resistance = Resistance()
self.resistance.set_resistance(0)
self.prevresistance = -1
self.clicker = Clicker()
self.pedalpressure = 128
self.braking = False
logging.info("Handlebar")
self.handlebar = Handlebar()
dir = self.handlebar.getSteer()
def start_controller(self):
logging.info("Starting ...")
self.handlebar.calibrate()
self.telemetry.read_telemetry()
logging.info("Awaiting Telemetry")
while self.telemetry.receiving == False:
self.telemetry.read_telemetry()
logging.info(f"Game is {self.telemetry.game} - Ready...")
self.lastresistance = round(time.time() * 1000)
gcount=0
gtotal=0
while True:
self.telemetry.read_telemetry()
dir = self.handlebar.getSteer()
rpm = self.speedsensor.getRPM()
self.targetspeed = (rpm/60)*WHEEL_MULTIPLIER
gamespeed = self.telemetry.speedms
#Replace this with somethign MUCH smarter this is just ON if we are too slow
if gamespeed > self.targetspeed + SPEEDGRACE :
self.pedalpressure = PEDALNEUTRAL
logging.info("Slower")
elif gamespeed >= 0 and gamespeed < self.targetspeed:
logging.info("Faster")
self.pedalpressure = PEDALRATE
gradient = self.telemetry.gradient
if gradient <0:
gradient = 0
if gradient > 9:
gradient = 9
gcount =gcount+1
gtotal=gtotal+gradient
logging.info(f"GS:{gamespeed} TS:{self.targetspeed} D:{dir} GD:{gradient}")
#Do not set the resistance as frequently
now = round(time.time() * 1000)
if now - self.lastresistance > 200:
resistance = int(gtotal/gcount)
if self.prevresistance != resistance:
logging.info(f"Setting resistance to {resistance}")
self.resistance.set_resistance(int(gtotal/gcount))
self.lastresistance = now
self.prevresistance = resistance
gcount=0
gtotal=0
control = self.clicker.get_button()
if control == 1:
exit()
if control == 2:
self.braking = True
if control == 3:
self.braking = False
if self.braking:
self.gamecontroller.accelerator(100)
self.gamecontroller.brake()
logging.info("Brake!!")
else :
self.gamecontroller.accelerator(int(self.pedalpressure))
#Right button brake
self.gamecontroller.steering(dir)
self.gamecontroller.send()
time.sleep(0.05)
```
#### File: gamebike/gamebike/telemetry.py
```python
import logging
import time
import struct
from struct import unpack
import socket
TELEMETRY_IP = "0.0.0.0"
TELEMETRY_PORT = 5005
class Telemetry(object):
def __init__(self):
logging.basicConfig(level=logging.INFO)
self.speedms=0.0
self.gradient=0.0
self.receiving=False
self.game = "unknown"
try:
self.telemetry_socket = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM # Internet
) # UDP
self.telemetry_socket.setblocking(0)
self.telemetry_socket.bind((TELEMETRY_IP, TELEMETRY_PORT))
logging.info("Opened receiving telemetry socket")
except Exception as e:
logging.error("Cannot open receiving telemetry socket")
logging.error(e)
self.crew_telemetry_socket = None
def parse_horizon_telemetry(self,data):
self.receiving = True
self.game = "horizon4"
# Just unpack what we want
self.gradient = float(struct.unpack("<f", data[60: 64])[0])
#Thats -ve radians for some reason
self.gradient= self.gradient*-57
self.speedms = float(struct.unpack("<f", data[256: 260])[0])
def parse_thecrew_telemetry(self,data):
self.receiving = True
self.game = "thecrew"
logging.debug(f"Received TheCrew telemetry message: {data}")
fmt = "IffffffffffffIIII"
telemetry = unpack(fmt, data)
telemetry = list(map(lambda x: round(x, 2), telemetry))
telemetry = {
"time": telemetry[0],
"angularVelocity": telemetry[1:4],
"orientation": telemetry[4:7],
"acceleration": telemetry[7:10],
"velocity": telemetry[10:13],
"position": telemetry[13:16],
"gameid": telemetry[16],
}
self.gradient = telemetry["orientation"][1] * 57
self.speedms = telemetry["velocity"][1]
def read_telemetry(self):
if self.telemetry_socket == None:
return False
try:
#Eat data until we run out
while True:
data, addr = self.telemetry_socket.recvfrom(324)
if len(data) == 324:
self.parse_horizon_telemetry(data)
if len(data) == 68:
self.parse_thecrew_telemetry(data)
except BlockingIOError:
logging.debug(f"speed(m/s): {self.speedms:.2} gradient:{self.gradient:.2}")
except Exception as e:
logging.error(f"Odd: {str(e)}")
if __name__ == "__main__":
print("Testing class Telemetry standalone")
game_telemetry = Telemetry()
while True:
game_telemetry.read_telemetry()
```
#### File: gamebike/gamebike/wheelcontroller.py
```python
import gamebike.controlmapbits as cmb
from pprint import pprint
import logging
import time
VWHEELDEVICE = "/dev/hidg0"
STEERING_SENSITIVITY = 9
class VirtualWheel(object):
def __init__(self):
logging.basicConfig(level=logging.INFO)
self.emulated_controller_fd = None
self.wheel_data = bytearray(cmb.WHEEL_NEUTRAL)
def setup_wheel(self):
try:
self.emulated_controller_fd = open(VWHEELDEVICE, "rb+", buffering=0)
logging.info("Found suitable HID device")
return True
except Exception as e:
logging.error(
"""Unable to open virtual Joystick - have you created it
and is this running with permissions to write to it?"""
+ str(e)
)
return False
# Lets us write multie changes at one time
def send(self):
logging.debug(self.wheel_data.hex())
self.emulated_controller_fd.write(self.wheel_data)
# Only emulating inputs as Required - Initially probably
# Steering, Accelerator , gears (fwd/reverse) and maybe handbrake
def steering(self, angle):
tmp = 128 - (angle*STEERING_SENSITIVITY)
#Limits
if tmp > 250:
tmp=250
if tmp < 10:
tmp=10
wheel_value = int(tmp * (cmb.STEER_MAX / 256))
logging.info(wheel_value)
self.wheel_data[cmb.WHEEL_WHEEL_HIGHBYTE] = int(wheel_value / 256)
self.wheel_data[cmb.WHEEL_WHEEL_LOWBYTE] = wheel_value % 256
#All calibration of how hard to press is a level up this time
def accelerator(self, pressure):
self.wheel_data[cmb.WHEEL_ACCELERATEBYTE] = 255 - (pressure % 256)
logging.debug(pressure%256)
def reset(self):
self.wheel_data = bytearray(cmb.WHEEL_NEUTRAL)
self.send()
def gear(self, input):
# Using -1 as reverse otherwise 1
# Assumign Manual Sequential box
c = cmb.WHEEL_GEARDOWN
t = cmb.WHEEL_GEARUP
buttons = [c, c, c, c, t, t]
# Change down
for button in buttons:
self.wheel_data[button[0]] |= button[1]
self.send()
time.sleep(0.2)
self.wheel_data[button[0]] &= ~button[1]
self.send()
time.sleep(0.2)
# Up twice
if input > 0:
buttons = [t, t]
for button in buttons:
self.wheel_data[button[0]] |= button[1]
self.send()
time.sleep(0.2)
self.wheel_data[button[0]] &= ~button[1]
self.send()
time.sleep(0.2)
def brake(self):
button = cmb.WHEEL_SQUARE;
self.wheel_data[button[0]] |= button[1]
#A Momentary push of n milliseconds
def _push_button(self,button,millis):
self.reset()
self.wheel_data[button[0]] |= button[1]
for h in range(0,int(millis/100)):
self.send()
time.sleep(0.1)
self.reset()
self.send()
#Dpad encoding is different as only one duirection at a time
def _push_dpad(self,button,millis):
self.reset()
self.wheel_data[button[0]] &= ~cmb.WHEEL_DPAD_MASK
self.wheel_data[button[0]] |= button[1]
for h in range(0,int(millis/100)):
self.send()
time.sleep(0.1)
self.reset()
self.send()
def configure_steam(self):
print("This is an interactive setup for Steam as a Controller")
print("In steam go to the page to configure a new controller have gets to to press keys")
input("Press Enter when ready")
for b in cmb.STEAM_BUTTON_MAPPINGS:
self._push_button(b,200)
time.sleep(0.2)
for b in cmb.STEAM_DPAD_MAPPINGS:
self._push_dpad(b,200)
time.sleep(0.2)
for x in range(0,30):
self.steering(x)
self.send()
time.sleep(0.02)
#Timing matters in this part
self.reset()
self.send()
time.sleep(0.2)
for x in range(128,255,4):
vwheel.accelerator(x);
vwheel.send();
time.sleep(0.02)
self.reset()
self.send();
time.sleep(0.2)
for b in cmb.STEAM_BUTTONS2_MAPPINGS:
self._push_button(b,200)
time.sleep(0.2)
self.reset()
self.send();
time.sleep(0.5)
self._push_button(cmb.WHEEL_TRIANGLE,200)
if __name__ == "__main__":
print("Testing class VirtualWheel standalone")
vwheel = VirtualWheel()
vwheel.setup_wheel()
#vwheel.configure_steam()
vwheel.accelerator(160)
while True:
for x in range(-30,30):
vwheel.steering(x)
vwheel.send()
time.sleep(0.02)
for x in range(30,-30,-1):
vwheel.steering(x)
vwheel.send()
time.sleep(0.02)
``` |
{
"source": "johnlpage/HomeEnergy",
"score": 3
} |
#### File: johnlpage/HomeEnergy/gettemps.py
```python
from bluepy.btle import Scanner, DefaultDelegate, Peripheral
from pprint import pprint
import time
import pymongo
import datetime
TEMPERATURE_SERVICE_UUID = "e95d6100-251d-470a-a062-fa1922dfa9a8";
TEMPERATURE_CHARACTERISTIC_UUID = "e95d9250-251d-470a-a062-fa1922dfa9a8";
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
def handleDiscovery(self, dev, isNewDev, isNewData):
pass
with open('/home/pi/.mongo_uri.txt') as f:
uri = f.readline().strip()
mongoclient = pymongo.MongoClient(uri)
map = { "c7:03:41:4f:6b:61": "office","d4:7d:86:27:03:d2": "upstairshall", "d4:26:d7:2d:dc:a7":"utility", "d1:a0:81:06:f5:23" : "GamesRoom","dd:ec:c8:b2:0b:64" : "Hallway", "d0:27:86:a2:95:e1" :"LivingRoom"}
def send_temp_to_atlas(dev,temp):
mongoclient.energy.roomtemps.insert_one({"date": datetime.datetime.now(), "location": map.get(dev,"unknown"), "temp": temp})
#BT comms can fail randomly
def read_temp(dev):
for a in range(5):
try:
microbit = Peripheral(dev)
print("Connected")
print("Getting Service Handle")
tempService = microbit.getServiceByUUID(TEMPERATURE_SERVICE_UUID)
print("Getting Characteristic Handle")
characteristics = tempService.getCharacteristics(forUUID=TEMPERATURE_CHARACTERISTIC_UUID)
print("Getting value")
temp = int.from_bytes(characteristics[0].read(),"big")
print(F"Device: {dev.addr} Temp: {temp}")
send_temp_to_atlas(dev.addr,temp)
return;
except Exception as e:
print(e)
#Scan for any and all Bluetooth devices for 10 seconds.
scanner = Scanner().withDelegate(ScanDelegate())
devices = scanner.scan(10.0)
time.sleep(2)
for dev in devices:
#print("Device %s (%s), RSSI=%d dB" % (dev.addr, dev.addrType, dev.rssi))
for (adtype, desc, value) in dev.getScanData():
if value.startswith("BBC micro:bit"):
print(f"Microbit found {value}")
read_temp(dev)
```
#### File: johnlpage/HomeEnergy/smartthings.py
```python
import aiohttp
import pysmartthings
import pymongo
import asyncio
from pprint import pprint
with open("/home/pi/.smartthing_token") as f:
token = f.readline().strip()
interesting_attributes = ['n','powerConsumption','switch',]
async def log_devices():
async with aiohttp.ClientSession() as session:
api = pysmartthings.SmartThings(session, token)
devices = await api.devices()
#print(len(devices))
deviceinfo = []
for device in devices:
try:
name = device.label
#Dont record my mobile phone
if "Tully" not in name:
await device.status.refresh()
turned_on = device.status.switch
record = { "on" : turned_on, "name":name}
for a in interesting_attributes:
v = device.status.values.get(a,None)
if v != None:
record[a]=v
#Let's not waste space on things that are off
if turned_on:
deviceinfo.append(record)
except Exception as e:
#Fridge returns an auth error?
print(e)
return deviceinfo
def main():
with open("/home/pi/.mongo_uri.txt") as f:
uri = f.readline().strip()
mongoclient = pymongo.MongoClient(uri)
loop = asyncio.get_event_loop()
devices = loop.run_until_complete(log_devices())
loop.close()
pprint(devices)
if len(devices)>0:
#Append this to an array in the latest electicity reading
collection = mongoclient.energy.meter
query = {"type":"electric"}
sort =[('date', -1)]
update = { "$push" : { "devices" : { "$each" : devices} }}
collection.find_one_and_update(query,update,sort=sort)
if __name__ == "__main__":
main()
``` |
{
"source": "johnlr/GSM-7bit",
"score": 3
} |
#### File: johnlr/GSM-7bit/gsm7bit_python2.py
```python
gsm = (u"@£$¥èéùìòÇ\nØø\rÅåΔ_ΦΓΛΩΠΨΣΘΞ\x1bÆæßÉ !\"#¤%&'()*+,-./0123456789:;<=>"
u"?¡ABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÑܧ¿abcdefghijklmnopqrstuvwxyzäöñüà")
ext = (u"````````````````````^```````````````````{}`````\\````````````[~]`"
u"|````````````````````````````````````€``````````````````````````")
def gsm_encode(plaintext):
res = ""
for c in plaintext:
idx = gsm.find(c)
if idx != -1:
res += chr(idx)
continue
idx = ext.find(c)
if idx != -1:
res += chr(27) + chr(idx)
return res.encode('hex')
print gsm_encode(u"Hello World")
``` |
{
"source": "johnlspouge/R0_Unstratified_Case_Data",
"score": 3
} |
#### File: 1_Prem_Matrices_to_df/Executable/prem_matrices_to_df.py
```python
import argparse
from sys import exit
from os.path import exists, isfile
from os import mkdir
from json import load
import pandas as pd
# Prem matrices
# https://doi.org/10.1371/journal.pcbi.1005697.s002
ifn1 = "MUestimates_all_locations_1.json"
ifn2 = "MUestimates_all_locations_2.json"
# UN ISO 3166-1 alpha-3 Country Codes
# https://unstats.un.org/unsd/methodology/m49/overview/UNSD — Methodology.csv
code_fn = "UNSDMethodology.csv"
# The code depends on a dictionary maintaining order according to insertion.
def main():
parser = getArguments()
argument = parser.parse_args()
check( argument )
# Loads UN ISO 3166-1 alpha-3 Country Codes from columns in *.csv file.
# The UN made errors in columns, so the read needs to specify relevant columns (which are uncorrupted).
fields = ['Country or Area', 'ISO-alpha3 Code']
df = pd.read_csv( f'{argument.idir}{code_fn}', skipinitialspace=True, usecols=fields )
country2code = df.set_index('Country or Area').to_dict()['ISO-alpha3 Code']
# Loads input json file with Prem_2017 matrices with headings.
with open( f'{argument.idir}{ifn1}' ) as iFH:
country2matrix = load( iFH ) # This file contains the strata as headings.
init=True
for country,matrix in country2matrix.items():
if init:
stratumS = matrix[0].keys()
stratumL = list(stratumS)
init=False # Records the stratumS only on the first country.
assert len(stratumS) == len(matrix) # The matrix must be square.
stratum2rate = dict()
for stratum in stratumS:
stratum2rate[ stratum ] = []
for row in matrix:
assert row.keys() == stratumS # The keys for each column must match.
for stratum,rate in row.items():
stratum2rate[ stratum ].append(rate)
df = pd.DataFrame(stratum2rate, index=stratumS)
code = country2code.get(country)
if code is None:
print(country)
code = country
df.to_csv(f'{argument.odir}{code}.csv')
# Loads input json file with Prem_2017 matrices without headings.
with open( f'{argument.idir}{ifn2}' ) as iFH:
country2matrix = load( iFH ) # This file contains the strata as headings.
for country,matrix in country2matrix.items():
assert len(stratumS) - 1 == len(matrix) # The matrix must be square and without headings.
stratum2rate = dict()
for stratum in stratumS:
stratum2rate[ stratum ] = []
init=True
for row in matrix:
rateS = row.keys()
assert len(stratumS) == len( rateS )
j = 0
for stratum,rate in row.items():
if init:
i = 0
for stratum in row.keys():
stratum2rate[ stratumL[ i ] ].append(stratum)
i += 1
init=False
stratum2rate[ stratumL[ j ] ].append(rate)
j += 1
df = pd.DataFrame(stratum2rate, index=stratumS)
code = country2code.get(country)
if code is None:
print(country)
code = country
df.to_csv(f'{argument.odir}{code}.csv')
# Check and fixes arguments if possible.
def check( argument ):
if not exists( f'{argument.idir}' ):
print( f'Error: a valid INPUT_DIRECTORY "{argument.idir}" is required.' )
exit
if not isfile( f'{argument.idir}{ifn1}' ):
print( f'Error: a valid INPUT_JSON_PREM_MATRICES "{argument.idir}{ifn1}" is required.' )
exit
if not isfile( f'{argument.idir}{ifn2}' ):
print( f'Error: a valid INPUT_JSON_PREM_MATRICES "{argument.idir}{ifn2}" is required.' )
exit
if not isfile( f'{argument.idir}{code_fn}' ):
print( f'Error: a valid INPUT_CSV_COUNTRY2CODE "{argument.idir}{code_fn}" is required.' )
exit
if not exists( f'{argument.odir}' ):
mkdir( f'{argument.odir}' )
def getArguments():
parser = argparse.ArgumentParser(description='The program outputs prem matrices by country as csv.\n')
parser.add_argument("-i", "--idir", dest="idir", default="../Data/", # input directory
help="INPUT_DIRECTORY", metavar="INPUT_DIRECTORY")
parser.add_argument("-o", "--odir", dest="odir", default="../Output/", # input directory
help="OUTPUT_DIRECTORY", metavar="OUTPUT_DIRECTORY")
return parser
if __name__ == "__main__":
main()
```
#### File: 3_Prem_Matrices_to_PF_Eigenvalue/Executable/prem_matrices_to_pf_eigenvalue.py
```python
import argparse
from sys import exit
from os.path import exists, isfile
from os import mkdir, listdir, remove
import pandas as pd
import numpy as np
from scipy.linalg import eig
from json import loads, dumps
def main():
parser = getArguments()
argument = parser.parse_args()
check( argument )
# Loads UN ISO 3166-1 alpha-3 Country Codes from columns in *.csv file.
# The UN made errors in columns, so the read needs to specify relevant columns (which are uncorrupted).
fields = ['Country or Area', 'ISO-alpha3 Code']
df = pd.read_csv( f'{argument.code_fn}', skipinitialspace=True, usecols=fields )
code2country = df.set_index('ISO-alpha3 Code').to_dict()['Country or Area']
# Processes all the *.csv files in f'{argument.pdir}'.
values = []
count = 0
for ifn in sorted( listdir( f'{argument.pdir}' ) ):
code = iso_csv_to_code( ifn )
if code is None:
print( "Invalid input *.csv name:", ifn )
continue
country = code2country.get(code)
if country is None:
print( "Invalid 3-letter *.csv name:", ifn )
country = code
count += 1
print( f'-- Reading "{ifn}" started --', flush=True )
dataframe = pd.read_csv( f'{argument.pdir}{ifn}' )
first_column = dataframe.columns[0]
# Delete first
dataframe = dataframe.drop([first_column], axis=1)
# Calculates for the full Prem matrix.
vector = [ code, country ]
matrix0 = dataframe.to_numpy()
assertPremMatrix( matrix0 )
# Iterates through exclude.
matrix = matrix0.copy()
excludes = loads( argument.excludes )
while True:
eigval, eigvec = perron_frobenius_eig( matrix )
vector.append( eigval )
if not excludes:
break
exclude = excludes.pop(0) # row&col numbers to delete
matrix = np.delete( np.delete( matrix0, exclude, 0 ), exclude, 1 )
assert is_square( matrix )
assert is_nonnegative( matrix )
values.append( vector )
print( f'-- Processed {count} Prem matrices --', flush=True )
cols = ['ISO-alpha3 Code', 'country', 'pf_eigenvalue']
excludes = loads( argument.excludes )
for exclude in excludes:
cols.append( 'pf_eigenvalue ' + dumps( exclude ) )
df = pd.DataFrame( values, columns = cols )
#df.set_index('ISO-alpha3 Code')
#df['ISO-alpha3 Code']=df.index
ofn = f'{argument.odir}pf_eigenvalue.csv'
if isfile( ofn ):
remove( ofn )
df.to_csv( ofn, index=False )
# Halts execution if df is not a Prem matrix.
def assertPremMatrix( matrix ):
SIXTEEN = 16
assert matrix.shape == (SIXTEEN, SIXTEEN)
assert is_nonnegative( matrix )
# Returns upper-case 3-letter code for filename [code].csv or None.
def iso_csv_to_code( ifn ):
if not ifn.endswith(".csv"):
return None
code = ifn.split( '.' )[0]
if not len(code) == 3:
return None
if not code.isupper():
return None
return code
# Returns True if matrix is 2D square.
def is_square( np_array ):
if np_array.ndim != 2:
return False
(m, n) = np_array.shape
return m == n
# Returns True if all elements are nonnegative.
def is_nonnegative( np_array ):
for x in np.nditer(np_array):
if x < 0.0:
return False
return True
# Calculates Perron-Frobenius eigenvalue and eigenvector.
def perron_frobenius_eig( np_array ):
if not is_square( np_array ):
raise ValueError('np_array is not a square two-dimensional matrix.')
if not is_nonnegative( np_array ):
raise ValueError('np_array contains negative elements.')
vals, vecs = eig(np_array)
maxcol = list(vals).index(max(vals))
perron_frobenius_eigval = vals[maxcol]
perron_frobenius_eigvec = vecs[:,maxcol]
if any([ coord < 0.0 for coord in perron_frobenius_eigvec ]):
perron_frobenius_eigvec = -perron_frobenius_eigvec
return perron_frobenius_eigval.real, perron_frobenius_eigvec.real
# Checks and fixes arguments if possible.
def check( argument ):
if not exists( f'{argument.odir}' ):
mkdir( f'{argument.odir}' )
if not exists( f'{argument.pdir}' ):
print( f'Error: a valid PREM_MATRICES_DIRECTORY "{argument.pdir}" is required.' )
exit
if not f'{argument.pdir}'.endswith('/'):
argument.pdir += '/'
def getArguments():
parser = argparse.ArgumentParser(description='The program outputs Perron-Frobenius eigenvalue for prem matrices by country as csv.\n')
parser.add_argument("-o", "--odir", dest="odir", default="../Output/", # input directory
help="OUTPUT_DIRECTORY", metavar="OUTPUT_DIRECTORY")
parser.add_argument("-c", "--code_fn", dest="code_fn", default="../Data/UNSDMethodology.csv", # *.csv code to country
help="CODE_FN", metavar="CODE_FN")
parser.add_argument("-p", "--prem_matrices_directory", dest="pdir", # [3-letter code].csv contains Prem matrix.
help="PREM_MATRICES_DIRECTORY", metavar="PREM_MATRICES_DIRECTORY")
parser.add_argument("-e", "--exclude_from_prem_matrices", dest="excludes", default="[]", # rows&cols to delete from Prem matrix eigenvalue calculation.
help="EXCLUDE_FROM_PREM_MATRICES", metavar="EXCLUDE_FROM_PREM_MATRICES")
return parser
if __name__ == "__main__":
main()
``` |
{
"source": "johnlspouge/Time-Homogeneity_of_Infection",
"score": 3
} |
#### File: 5_llr_test/Executable/jls_animal_model.py
```python
import warnings
import math
import numpy as np
import scipy.optimize as opt
from scipy.stats import chi2
import numdifftools as ndt
from abc import ABC, abstractmethod
from jls_animal_format import is_survivors, rstrip_nan, is_infection
class AnimalModel(ABC):
def __init__(self, survivors ):
survivors = rstrip_nan( survivors )
if not is_survivors( survivors ):
raise Exception('invalid list of surccessive survivor counts')
self.survivors = survivors # uninfected animals after Challenge i.
self.pts = AnimalModel._to_pts( self.survivors )
super().__init__()
# Returns model name.
def name(self):
return type(self).__name__
# Returns probability of infection corresponding to i = pt[0] = t-1.
@abstractmethod
def p_infection(self, i, x ):
pass
# Returns False if x violates bounds.
@staticmethod
@abstractmethod
def is_in_bounds(x):
pass
# Returns True if the null model is on the boundary of the model parameter space.
# Returns None if the model is the null model.
@staticmethod
def is_null_on_boundary(x):
pass
# Returns mle for constant hazard of infection as a scalar.
@staticmethod
@abstractmethod
def x0( survivors ):
return (survivors[ 0 ] - survivors[ -1 ]) / sum( survivors[ :-1 ] )
# Depends on individual models to calculate p_infection, probability of infection.
# pt = [ t-1, ns[t], ds[t] ], where ns count the challenged animals; and ds, the deaths.
def ln_likelihood(self, x ):
if not is_infection(self.survivors ) and np.allclose( x, self.x0(self.survivors ) ):
return 0.0
if not self.is_in_bounds(x):
return -math.inf
ln_likelihood = 0.0
for pt in self.pts:
ln_likelihood += AnimalModel._add_ln_likelihood( pt, self.p_infection( pt[0], x ) )
return ln_likelihood
# -self.ln_likelihood( x ) for minimization in scipy.opt.
def _minus_ln_likelihood(self, x ):
return -self.ln_likelihood( x )
# Returns the maximum likelihood estimator as an array, even in one dimension.
def mle(self, method='Basinhopping' ):
#print(method)
x0 = self.x0( self.survivors )
if not is_infection( self.survivors ):
return x0
with warnings.catch_warnings():
warnings.filterwarnings( "ignore", category=RuntimeWarning )
_EPS = 1.0e-06
if method == 'Nelder-Mead':
optimum = opt.minimize( self._minus_ln_likelihood, x0, method='Nelder-Mead',
bounds=None, tol=None, callback=None,
options={'xatol': _EPS, 'fatol': _EPS, 'maxiter': None, 'maxfev': None, 'disp': False, 'return_all': False, 'adaptive': True})
elif method == 'Powell':
optimum = opt.minimize( self._minus_ln_likelihood, x0, method='Powell',
bounds=None, tol=None, callback=None,
options={'xtol': _EPS, 'ftol': _EPS, 'maxiter': None, 'maxfev': None, 'disp': False, 'return_all': False})
if len( x0 ) == 1: # Converts Powell optimum to list for consistency.
optimum.x = [optimum.get('x').tolist()]
#print(optimum.x)
elif method == 'Basinhopping':
optimum = opt.basinhopping( self._minus_ln_likelihood, x0 )
else:
raise Exception('unknown optimization method')
return optimum.get('x')
# Returns arrays of NaN if is_in_bounds(x) but on the boundary.
def fisher_information(self, x): # usually the maximum likelihood estimator
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return ndt.Hessian( self._minus_ln_likelihood )(x)
##########################################################################################
# private routines
##########################################################################################
# Returns pts[t] = [ t-1, ns[t], ds[t] ], where ns count the challenged animals; and ds, the deaths.
@staticmethod
def _to_pts(survivors):
ns = survivors[:-1]
ds = [ i - j for i,j in list( zip( survivors, survivors[1:] ) ) if not math.isnan( j ) ]
assert( len(ns) == len(ds) )
return list( zip( range( len(ns) ), ns, ds ) ) # (t-1, ns[t], ds[t])
# Returns the increments to ln_likelihood from Challenge t.
# Depends on individual models to calculate p_infection, probability of infection.
# pt = [ t-1, ns[t], ds[t] ], where ns count the challenged animals; and ds, the deaths.
@staticmethod
def _add_ln_likelihood( pt, p_infection ):
p_infection = min( 1.0, max( 0.0, p_infection ) ) # sentinels
if p_infection == 0.0 and pt[2] == 0:
return 0.0
elif p_infection == 1.0 and pt[2] == pt[1]:
return 0.0
elif p_infection == 0.0 or p_infection == 1.0: # impossibility
return -math.inf
ln_p = math.log( p_infection, math.e ) # ln probability of deaths
ln_q = math.log( 1.0 - p_infection, math.e ) # ln probability of non-deaths
return pt[2] * ln_p + ( pt[1] - pt[2] ) * ln_q
def _test_AnimalModel():
survivors = [ 64, 32, 16, 8, 4, 2, 1 ]
assert( AnimalModel.x0( survivors ) == 0.5 )
survivors = [ 64, 16, 4, 1 ]
assert( AnimalModel.x0( survivors ) == 0.75 )
##########################################################################################
# derived classes
##########################################################################################
_EPS = 0.003 # accuracy for numerical tests
_METHODS = [ 'Nelder-Mead', 'Powell', 'Basinhopping' ]
# <NAME> al. (2005) Preclinical assessment of HIV vaccines and microbicides by repeated low-dose virus challenges. PLoS Med 2: e249.
class ConstantHazard( AnimalModel ): # p # constant probability p of infection on Challenge t
def __init__(self, survivors):
super().__init__(survivors)
# Returns probability of infection corresponding to pt[0] = t-1.
def p_infection(self, i, x):
return x[0]
# Returns one-dimensional list as MLE for reduced model.
def lr_interval(self, confidence ):
DF = 1
p_hat = AnimalModel.x0( self.survivors )
chi = chi2.ppf( confidence, DF )
def diff( x ):
return self.ln_likelihood( [x] ) - self.ln_likelihood( [p_hat] ) + 0.5 * chi
if p_hat == 0.0:
lo = 0.0
else:
lo = opt.brentq( diff, 0.0, p_hat )
if p_hat == 1.0:
hi = 1.0
else:
hi = opt.brentq( diff, p_hat, 1.0 )
return [lo, hi]
# Returns 2.0 * deviation of full from reduced model.
def chisquared_fct(self):
constantHazard = ConstantHazard( self.survivors )
return 2.0 * (self.ln_likelihood( self.mle() ) - constantHazard.ln_likelihood( constantHazard.x0( self.survivors ) ))
# Returns False if x violates bounds.
@staticmethod
def is_in_bounds(x):
return 0.0 < x[0] <= 1.0
# Returns True if x is on the boundary of the model parameter space.
@staticmethod
def is_null_on_boundary(x):
return None # ConstantHazard is the null model.
# Returns one-dimensional list as MLE for reduced model.
@staticmethod
def x0( survivors ):
return [AnimalModel.x0( survivors )]
def _test_ConstantHazard():
#print('constant_hazard')
data = {
( 64, 32, 16, 8, 4, 2, 1 ):{'x':[0.5],'fun':-87.34,
'fisher_information':[[504.]]}, # 504. checked by hand.
( 64, 16, 4, 1 ):{'x':[0.75],'fun':-47.24,
'fisher_information':[[448.]]},
}
for survivors,optimize_result0 in data.items():
#print(survivors)
x_hat0 = optimize_result0.get('x')
fun0 = optimize_result0.get('fun')
information0 = np.asarray(optimize_result0.get('fisher_information'))
model = ConstantHazard( survivors )
assert( model.name() == 'ConstantHazard' )
x = [0.2]
[ p ] = x
for i in range(10):
assert( model.p_infection(i, x) == p )
assert( all( [ model.p_infection( i, x_hat0 ) == x_hat0[0] for i in range(10) ] ) )
for method in _METHODS:
#print(method)
x_hat = model.mle( method )
fun = model.ln_likelihood( x_hat )
information = model.fisher_information( x_hat )
assert( all( math.isclose( i, j, abs_tol=_EPS ) for i,j in zip( x_hat, x_hat0 ) ) )
assert( math.isclose( information, information0, rel_tol=_EPS ) )
assert( math.isclose( fun, fun0, rel_tol=_EPS ) )
class ConstantHazardFullModel( ConstantHazard ):
# Returns 2.0 * deviation of full from reduced model.
def chisquared_fct(self):
constantHazard = ConstantHazard( self.survivors )
return 2.0 * (self.ln_likelihood( self.mle() ) - constantHazard.ln_likelihood( constantHazard.x0( self.survivors ) ))
# Returns p-value corresponding to the chisquared_fct.
def df(self):
return len( self.x0( self.survivors ) ) - len( super().x0( self.survivors ) )
# Returns p-value corresponding to the chisquared_fct.
def llr_pvalue(self):
return chi2.sf(self.chisquared_fct(), self.df() )
# <NAME> (2012) The role of exposure history on HIV acquisition: insights from repeated low-dose challenge studies. PLoS Comput Biol. 8: p. e1002767.
class ArithmeticPriming( ConstantHazardFullModel ): # p_infection = p + (t - 1) * eps on Challenge t
def __init__(self, survivors):
super().__init__(survivors)
# Returns probability of infection corresponding to i = pt[0] = t-1.
def p_infection(self, i, x):
[ p, eps ] = x
return p + i * eps
# Returns False if x violates bounds.
@staticmethod
def is_in_bounds(x):
return 0.0 < x[0] <= 1.0
# Returns True if x is on the boundary of the model parameter space.
@staticmethod
def is_null_on_boundary(x):
return False
# Returns one-dimensional list as MLE for reduced model.
@staticmethod
def x0(survivors):
return [AnimalModel.x0( survivors ), 0.0]
def _test_ArithmeticPriming():
#print('arithmetic_priming')
data = {
( 64, 32, 16, 8, 4, 2, 1 ):{'x':[0.5,0.0],'fun':-87.34,'llr_pvalue':1.0},
( 64, 16, 4, 1 ):{'x':[0.75,0.0],'fun':-47.24,'llr_pvalue':1.0},
}
#print('ArithmeticPriming')
for survivors,optimize_result0 in data.items():
#print(survivors)
x_hat0 = optimize_result0.get('x')
fun0 = optimize_result0.get('fun')
model = ArithmeticPriming( survivors )
assert( math.isclose( model.llr_pvalue(), optimize_result0.get('llr_pvalue'), abs_tol=_EPS ) )
assert( model.name() == 'ArithmeticPriming' )
x = [0.2, 0.1]
[ p, eps ] = x
for i in range(10):
assert( model.p_infection(i, x) == p + i * eps )
for method in _METHODS:
#print(method)
x_hat = model.mle( method )
fun = model.ln_likelihood( x_hat )
assert( all( math.isclose( i, j, abs_tol=_EPS ) for i,j in zip( x_hat, x_hat0 ) ) )
assert( math.isclose( fun, fun0, rel_tol=_EPS ) )
class GeometricPriming( ConstantHazardFullModel ): # p_infection = p * r**(t - 1) on Challenge t
def __init__(self, survivors):
super().__init__( survivors )
# Returns probability of infection corresponding to i = pt[0] = t-1.
def p_infection(self, i, x):
[ p, r ] = x
return p * r ** i
# Returns False if x violates bounds.
@staticmethod
def is_in_bounds(x):
return 0.0 < x[0] <= 1.0 and 0.0 < x[1]
# Returns True if x is on the boundary of the model parameter space.
@staticmethod
def is_null_on_boundary(x):
return False
# Returns one-dimensional list as MLE for reduced model.
@staticmethod
def x0( survivors ):
return [AnimalModel.x0( survivors ), 1.0]
def _test_GeometricPriming():
data = {
( 64, 32, 16, 8, 4, 2, 1 ):{'x':[0.5, 1.0],'fun':-87.34,
'fisher_information':[[ 504.,228.],[228.,282.]],'llr_pvalue':1.0}, # 504. checked by hand.
( 64, 16, 4, 1 ):{'x':[0.75, 1.0],'fun':-47.24,
'fisher_information':[[ 448.,96.],[96.,96.]],'llr_pvalue':1.0},
( 16384, 12288, 10752, 10080, 9765 ):{'x':[0.25, 0.5],'fun':-17758.51,
'fisher_information':[[ 132139.4,33316.08],[33316.08,30196.32]],'llr_pvalue':0.0},
( 16, 12, 10, 10, 10 ):{'x':[0.2746, 0.3388],'fun':-15.18, # Nelder-Mead minimization
'fisher_information':[[ 103.9577,22.89840],[22.89840,30.11120]],'llr_pvalue':0.01586106},
}
#print('GeometricPriming')
for survivors,optimize_result0 in data.items():
#print(survivors)
x_hat0 = optimize_result0.get('x')
fun0 = optimize_result0.get('fun')
information0 = np.asarray(optimize_result0.get('fisher_information'))
model = GeometricPriming( survivors )
assert( math.isclose( model.llr_pvalue(), optimize_result0.get('llr_pvalue'), abs_tol=_EPS ) )
#assert( math.isclose( model.llr_pvalue(), 0.0, abs_tol=_EPS ) )
assert( model.name() == 'GeometricPriming' )
x = [0.2, 0.1]
[ p, r ] = x
for i in range(10):
assert( model.p_infection(i, x) == p * r ** i )
for method in _METHODS:
#print(method)
x_hat = model.mle( method )
fun = model.ln_likelihood(x_hat)
information = model.fisher_information( x_hat )
#print(fisher_information)
assert( all( math.isclose( i, j, abs_tol=_EPS ) for i,j in zip( x_hat, x_hat0 ) ) )
assert( np.allclose(information, information0, rtol=_EPS ) )
assert( math.isclose( fun, fun0, rel_tol=_EPS ) )
# <NAME> (2012) The role of exposure history on HIV acquisition: insights from repeated low-dose challenge studies. PLoS Comput Biol. 8: p. e1002767.
class StepPriming( ConstantHazardFullModel ): # p_infection = p_1, but switches to p_2 strictly after Challenge l_step
def __init__(self, survivors, l_step): # l_step is the time t at which p_2 starts to pertain.
assert( isinstance( l_step, int ) and 0 < l_step )
if len( survivors ) <= l_step:
raise Exception('The change-point occurs after the end of challenges.')
self.l_step = l_step
super().__init__(survivors)
# Returns probability of infection corresponding to i = pt[0] = t-1.
def p_infection(self, i, x):
[ p_1, p_2 ] = x
if i < self.l_step:
return p_1
return p_2
# Returns False if x violates bounds.
@staticmethod
def is_in_bounds(x):
return 0.0 < x[0] <= 1.0 and 0.0 <= x[1] <= 1.0
# Returns True if x is on the boundary of the model parameter space.
@staticmethod
def is_null_on_boundary(x):
return False
# Returns one-dimensional list as MLE for reduced model.
@staticmethod
def x0( survivors ):
return [AnimalModel.x0( survivors ), AnimalModel.x0( survivors )]
def _test_StepPriming():
data = {
( 64, 32, 16, 8, 4, 2, 1 ):{'x':[0.5,0.5],'fun':-87.34,'llr_pvalue':1.0},
( 64, 16, 4, 1 ):{'x':[0.75,0.75],'fun':-47.24,'llr_pvalue':1.0},
}
#print('StepPriming')
for survivors,optimize_result0 in data.items():
#print(survivors)
x_hat0 = optimize_result0.get('x')
fun0 = optimize_result0.get('fun')
for l_step in range(1,3):
model = StepPriming( survivors, l_step )
assert( math.isclose( model.llr_pvalue(), optimize_result0['llr_pvalue'], abs_tol=_EPS ) )
assert( model.name() == 'StepPriming' )
x = [0.2, 0.1]
[ p_1, p_2 ] = x
for i in range(10):
if i < l_step:
assert( model.p_infection(i, x) == p_1 )
else:
assert( model.p_infection(i, x) == p_2 )
for method in _METHODS:
#print(method)
x_hat = model.mle( method )
fun = model.ln_likelihood(x_hat)
assert( all( math.isclose( i, j, abs_tol=_EPS ) for i,j in zip( x_hat, x_hat0 ) ) )
assert( math.isclose( fun, fun0, rel_tol=_EPS ) )
# <NAME> (2012) The role of exposure history on HIV acquisition: insights from repeated low-dose challenge studies. PLoS Comput Biol. 8: p. e1002767.
class BetaFrailty( ConstantHazardFullModel ): # p # constant probability p of infection on Challenge t
def __init__(self, survivors):
super().__init__(survivors)
# Returns probability of infection corresponding to i = pt[0] = t-1.
def p_infection(self, i, x): # x = [ p_mean, p_var ]
[ a, b ] = BetaFrailty._to_beta_params( x )
p_infection = a / (a + b + i)
return p_infection
# Returns False if x violates bounds.
@staticmethod
def is_in_bounds(x):
return 0.0 < x[0] <= 1.0 and 0.0 <= x[1] <= x[0] * (1 - x[0])
# Returns True if x is on the boundary of the model parameter space.
@staticmethod
def is_null_on_boundary(x):
return True
# Returns the first two centered moments for the beta distribution for the "beta_frailty" full model.
@staticmethod
def _to_moments( beta_params ): # [a,b] = beta_params
[ a, b ] = beta_params
p_mean = a / (a + b)
p_var = (a / (a + b)) * (b / (a + b)) / (a + b + 1.0)
return [ p_mean, p_var ]
# Returns [a,b] = beta_params for the beta distribution for the "beta_frailty" full model.
@staticmethod
def _to_beta_params( moments ): # [a,b] = beta_params
[ p_mean, p_var ] = moments
TOL = 1.0e-12
s = p_mean * (1.0 - p_mean) / max( TOL, p_var ) - 1.0
a = p_mean * s
b = (1.0 - p_mean) * s
return [ a, b ]
# Returns one-dimensional list as MLE for reduced model.
@staticmethod
def x0( survivors ):
p_mean0 = AnimalModel.x0( survivors )
p_var0 = p_mean0 * (1.0 - p_mean0) * 0.1
return [p_mean0, p_var0]
def _test_BetaFrailty():
# test reparametrization
[ a0, b0 ] = [ 3.0, 4.0 ]
[ a, b ] = BetaFrailty._to_beta_params( BetaFrailty._to_moments( [ a0, b0 ] ) )
assert ( abs( a / a0 - 1.0 ) < _EPS )
assert ( abs( b / b0 - 1.0 ) < _EPS )
data = { # Nelder-Mead minimization
( 64, 32, 16, 8, 4, 2, 1 ):{'x':[0.5, 0.0],'fun':-87.34,'llr_pvalue':1.0},
( 64, 16, 4, 1 ):{'x':[0.75, 0.0],'fun':-47.24,'llr_pvalue':1.0},
( 16384, 12288, 10752, 10080, 9765 ):{'x':[0.2534, 0.1114],'fun':-17821.39,
'fisher_information':[[ 269904.7,-331621.3],[-331621.3,607597.8]],'llr_pvalue':0.0},
( 16, 12, 10, 10, 10 ):{'x':[0.2593, 0.1303],'fun':-15.71,
'fisher_information':[[ 273.6344,-358.8308],[-358.8308,691.0599]],'llr_pvalue':0.02930025}
}
#print('BetaFrailty')
for survivors,optimize_result0 in data.items():
#print(survivors)
x_hat0 = optimize_result0.get('x')
[ p_mean0, p_var0 ] = x_hat0
fun0 = optimize_result0.get('fun')
model = BetaFrailty( survivors )
assert( math.isclose( model.llr_pvalue(), optimize_result0['llr_pvalue'], abs_tol=_EPS ) )
assert( model.name() == 'BetaFrailty' )
x = [0.2, 0.1]
[ a, b ] = BetaFrailty._to_beta_params( x )
for i in range(10):
assert( model.p_infection(i, x) == a / (a + b + i) )
for method in _METHODS:
#print(method)
x_hat = model.mle( method )
fun = model.ln_likelihood(x_hat)
[ p_mean, p_var ] = x_hat
if p_var0 < _EPS: # boundary value
assert( math.isclose( p_mean, p_mean0, rel_tol=_EPS ) )
assert( math.isclose( p_var, p_var0, abs_tol=_EPS ) )
else:
information = model.fisher_information( x_hat )
information0 = np.asarray(optimize_result0.get('fisher_information'))
assert( all( math.isclose( i, j, rel_tol=_EPS ) for i,j in zip( x_hat, x_hat0 ) ) )
assert( math.isclose( fun, fun0, rel_tol=_EPS ) )
assert( np.allclose(information, information0, rtol=_EPS ) )
# <NAME>, et al. (2009) Power to detect the effects of HIV vaccination in repeated low-dose challenge experiments. J Infect Dis. 200: p. 609-13.
class DeltaFrailty( ConstantHazardFullModel ): # p # constant probability p of infection on Challenge t
def __init__(self, survivors):
super().__init__( survivors )
# Returns probability of infection corresponding to i = pt[0] = t-1.
def p_infection(self, i, x): # x = [ p_mean, p_var ]
[ p, theta ] = x
p_infection = ((1.0 - theta) * p * (1.0 - p)**i) / (theta + (1.0 - theta) * (1.0 - p)**i)
return p_infection
# Returns False if x violates bounds.
@staticmethod
def is_in_bounds(x):
return 0.0 < x[0] <= 1.0 and 0.0 <= x[1] < 1.0
# Returns True if x is on the boundary of the model parameter space.
@staticmethod
def is_null_on_boundary(x):
return True
# Returns one-dimensional list as MLE for reduced model.
@staticmethod
def x0( survivors ):
if not is_infection( survivors ):
return [ 1.0, 1.0 ]
survivor_count = survivors[-1]
survivors0 = [ i - survivor_count for i in survivors ]
p0 = AnimalModel.x0( survivors0 )
theta0 = survivor_count / survivors[0]
return [ p0, theta0 ]
def _test_DeltaFrailty():
data = {
( 64, 32, 16, 8, 4, 2, 1 ):{'x':[0.5,0.0],'fun':-87.34,'llr_pvalue':1.0},
( 64, 16, 4, 1 ):{'x':[0.75,0.0],'fun':-47.24,'llr_pvalue':1.0},
( 16384, 12288, 10752, 10080, 9765 ):{'x':[0.5904, 0.5843],'fun':-17765.62,
'fisher_information':[[28437.1,-7555.1],[-7555.1,64268.9]],'llr_pvalue':0.0},
( 16, 12, 10, 10, 10 ):{'x':[0.7397, 0.6232],'fun':-15.06,
'fisher_information':[[ 35.61855,-1.804427],[-1.804427,67.64198697]],'llr_pvalue':0.01388016}
}
#print('DeltaFrailty')
for survivors,optimize_result0 in data.items():
#print(survivors)
x_hat0 = optimize_result0.get('x')
fun0 = optimize_result0.get('fun')
model = DeltaFrailty( survivors )
assert( math.isclose( model.llr_pvalue(), optimize_result0['llr_pvalue'], abs_tol=_EPS ) )
assert( model.name() == 'DeltaFrailty' )
x = [0.2, 0.1]
[ p, theta ] = x
for i in range(10):
assert( model.p_infection(i, x) == ((1.0 - theta) * p * (1.0 - p)**i) / (theta + (1.0 - theta) * (1.0 - p)**i) )
for method in _METHODS:
#print(method)
x_hat = model.mle( method )
fun = model.ln_likelihood(x_hat)
information = model.fisher_information( x_hat )
assert( all( math.isclose( i, j, abs_tol=_EPS ) for i,j in zip( x_hat, x_hat0 ) ) )
assert( math.isclose( fun, fun0, rel_tol=_EPS ) )
if x_hat0[1] == 0.0: # The mle of full model is on the boundary.
assert( np.all( np.isnan(information) ) )
else:
information0 = np.asarray( optimize_result0.get( 'fisher_information' ) )
assert( np.allclose( information, information0, rtol=_EPS ) )
def main():
_test_AnimalModel()
_test_ConstantHazard()
_test_ArithmeticPriming()
_test_GeometricPriming()
_test_StepPriming()
_test_BetaFrailty()
_test_DeltaFrailty()
if __name__ == "__main__":
main()
``` |
{
"source": "johnluetke/home-assistant-core",
"score": 3
} |
#### File: homeassistant/util/logging.py
```python
import asyncio
from functools import partial, wraps
import inspect
import logging
import logging.handlers
import queue
import traceback
from typing import Any, Callable, Coroutine
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE
from homeassistant.core import HomeAssistant, callback
class HideSensitiveDataFilter(logging.Filter):
"""Filter API password calls."""
def __init__(self, text: str) -> None:
"""Initialize sensitive data filter."""
super().__init__()
self.text = text
def filter(self, record: logging.LogRecord) -> bool:
"""Hide sensitive data in messages."""
record.msg = record.msg.replace(self.text, "*******")
return True
class HomeAssistantQueueHandler(logging.handlers.QueueHandler):
"""Process the log in another thread."""
def emit(self, record: logging.LogRecord) -> None:
"""Emit a log record."""
try:
self.enqueue(record)
except asyncio.CancelledError: # pylint: disable=try-except-raise
raise
except Exception: # pylint: disable=broad-except
self.handleError(record)
@callback
def async_activate_log_queue_handler(hass: HomeAssistant) -> None:
"""
Migrate the existing log handlers to use the queue.
This allows us to avoid blocking I/O and formatting messages
in the event loop as log messages are written in another thread.
"""
simple_queue = queue.SimpleQueue() # type: ignore
queue_handler = HomeAssistantQueueHandler(simple_queue)
logging.root.addHandler(queue_handler)
migrated_handlers = []
for handler in logging.root.handlers[:]:
if handler is queue_handler:
continue
logging.root.removeHandler(handler)
migrated_handlers.append(handler)
listener = logging.handlers.QueueListener(
simple_queue, *migrated_handlers, respect_handler_level=False
)
listener.start()
@callback
def _async_stop_queue_handler(_: Any) -> None:
"""Cleanup handler."""
logging.root.removeHandler(queue_handler)
listener.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, _async_stop_queue_handler)
def log_exception(format_err: Callable[..., Any], *args: Any) -> None:
"""Log an exception with additional context."""
module = inspect.getmodule(inspect.stack()[1][0])
if module is not None:
module_name = module.__name__
else:
# If Python is unable to access the sources files, the call stack frame
# will be missing information, so let's guard.
# https://github.com/home-assistant/home-assistant/issues/24982
module_name = __name__
# Do not print the wrapper in the traceback
frames = len(inspect.trace()) - 1
exc_msg = traceback.format_exc(-frames)
friendly_msg = format_err(*args)
logging.getLogger(module_name).error("%s\n%s", friendly_msg, exc_msg)
def catch_log_exception(
func: Callable[..., Any], format_err: Callable[..., Any], *args: Any
) -> Callable[[], None]:
"""Decorate a callback to catch and log exceptions."""
# Check for partials to properly determine if coroutine function
check_func = func
while isinstance(check_func, partial):
check_func = check_func.func
wrapper_func = None
if asyncio.iscoroutinefunction(check_func):
@wraps(func)
async def async_wrapper(*args: Any) -> None:
"""Catch and log exception."""
try:
await func(*args)
except Exception: # pylint: disable=broad-except
log_exception(format_err, *args)
wrapper_func = async_wrapper
else:
@wraps(func)
def wrapper(*args: Any) -> None:
"""Catch and log exception."""
try:
func(*args)
except Exception: # pylint: disable=broad-except
log_exception(format_err, *args)
wrapper_func = wrapper
return wrapper_func
def catch_log_coro_exception(
target: Coroutine[Any, Any, Any], format_err: Callable[..., Any], *args: Any
) -> Coroutine[Any, Any, Any]:
"""Decorate a coroutine to catch and log exceptions."""
async def coro_wrapper(*args: Any) -> Any:
"""Catch and log exception."""
try:
return await target
except Exception: # pylint: disable=broad-except
log_exception(format_err, *args)
return None
return coro_wrapper()
def async_create_catching_coro(target: Coroutine) -> Coroutine:
"""Wrap a coroutine to catch and log exceptions.
The exception will be logged together with a stacktrace of where the
coroutine was wrapped.
target: target coroutine.
"""
trace = traceback.extract_stack()
wrapped_target = catch_log_coro_exception(
target,
lambda *args: "Exception in {} called from\n {}".format(
target.__name__, # type: ignore
"".join(traceback.format_list(trace[:-1])),
),
)
return wrapped_target
``` |
{
"source": "JohnLulzh2002/BoringHomeworks",
"score": 3
} |
#### File: Others/Python程序设计-实验报告/5.2.2.py
```python
def func(a=128, b=2, *args):
print(a + b)
print("args:", args)
print()
print('positional arguments')
func(1,2)
print('keyword arguments')
func(b=3,a=4)
print('default values')
func()
print('args')
func(5,6,7,8)
``` |
{
"source": "johnlunney/digitakt-song-mode",
"score": 3
} |
#### File: johnlunney/digitakt-song-mode/controller.py
```python
import json
from diquencer import Sequencer
from view.utils import display_alert
NOT_AVALIABLE = "N/A"
class SongModeController:
def __init__(self):
self._sequencer = Sequencer(error_callback=display_alert)
@property
def position(self):
return self._sequencer.position or NOT_AVALIABLE
@property
def output_ports(self):
return self._sequencer.output_ports
@property
def current_pattern(self):
return self._sequencer.current_pattern or NOT_AVALIABLE
@property
def patterns(self):
if self._sequencer.patterns:
return [pattern.name for pattern in self._sequencer.patterns]
return [NOT_AVALIABLE]
@property
def next_pattern(self):
return self._sequencer.next_pattern or NOT_AVALIABLE
def load(self, sequence_path):
with open(sequence_path) as sequence_file:
self._sequencer.load_sequence(json.load(sequence_file))
def set_output_channel(self, channel):
self._sequencer.set_midi_channel(channel)
def set_output_port(self, output_port):
self._sequencer.set_output_port(output_port)
def set_start_pattern(self, start_pattern_idx):
self._sequencer.set_start_pattern(start_pattern_idx)
def toggle_seq(self):
if self._sequencer.is_playing:
self._sequencer.stop()
else:
self._sequencer.start()
```
#### File: digitakt-song-mode/view/midi.py
```python
import logging
import tkinter as tk
from diquencer.exceptions import MIDIOutputError
from .utils import display_alert
class MIDIFrame(tk.Frame):
def __init__(self, controller, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
# Output selector
self.output_selector = OutputSelector(controller, self)
self.output_selector.grid(row=0, sticky=tk.W + tk.E)
self.output_selector.columnconfigure(0, weight=1)
# Channel selector
self.channel_selector = ChannelSelector(controller, self)
self.channel_selector.grid(row=1, sticky=tk.W + tk.E)
self.channel_selector.columnconfigure(0, weight=1)
class Selector(tk.Frame):
def __init__(self, title, choices, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.choice_var = tk.StringVar(self)
if choices:
self.choice_var.set(choices[0])
self.option_command(self.choice_var.get())
self.title = tk.Label(self, text=title)
self.title.grid(row=0, sticky=tk.W)
self.selector = tk.OptionMenu(
self, self.choice_var, *choices, command=self.option_command
)
self.selector.grid(row=1, sticky=tk.W + tk.E)
def option_command(self, new_value):
raise NotImplementedError
class OutputSelector(Selector):
def __init__(self, controller, parent, *args, **kwargs):
self.controller = controller
super().__init__(
"MIDI output:", controller.output_ports, parent, *args, **kwargs
)
def option_command(self, new_value):
try:
self.controller.set_output_port(new_value)
except MIDIOutputError as error:
logging.debug(error)
display_alert("Cannot open MIDI output port.")
class ChannelSelector(Selector):
def __init__(self, controller, parent, *args, **kwargs):
self.controller = controller
super().__init__("MIDI channel:", list(range(1, 17)), parent, *args, **kwargs)
def option_command(self, new_value):
self.controller.set_output_channel(int(new_value))
```
#### File: digitakt-song-mode/view/transport.py
```python
import logging
import tkinter as tk
from diquencer.exceptions import MIDIOutputError, SequenceNotSet
from .utils import display_alert
class TransportFrame(tk.Frame):
def __init__(self, controller, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.controller = controller
self.position_label = tk.Label(self, text="Position: N/A", font=(None, "16"))
self.position_label.grid(row=0, sticky=tk.W)
self.toggle_seq_btn = tk.Button(
self, text="Start/stop", command=self.toggle_seq
)
self.toggle_seq_btn.grid(row=0, column=1, sticky=tk.E)
self.columnconfigure(0, minsize=256)
def refresh(self):
self.position_label.config(text=f"Position: {self.controller.position}")
self.after(42, self.refresh)
def toggle_seq(self):
try:
self.controller.toggle_seq()
except SequenceNotSet as error:
logging.debug(error)
display_alert("Please open sequence file before starting sequencer.")
except MIDIOutputError as error:
logging.debug(error)
display_alert(error)
``` |
{
"source": "johnlwhiteman/COCOSCATS",
"score": 2
} |
#### File: COCOSCATS/Core/Cfg.py
```python
import copy
import glob
import inspect
import json
import os
import re
import sys
from pprint import pprint
from Core.Error import Error
from Core.File import File
from Core.Framework import Framework
from Core.Msg import Msg
from Core.Text import Text
class Cfg(object):
def __init__(self, cfgPath):
self.cfgPath = cfgPath
self.cfg = None
self.installDir = Framework.getInstallDir()
self.pluginTypes = ["IO", "Analyzer", "Translator", "Demo"]
self.pluginTypeAlias = \
{"Input": "IO", "Analyzer": "Analyzer", "Translator": "Translator", "Output": "IO", "Demo": "Demo"}
def checkIfCfgLoaded(self):
if self.cfg is None:
Error.raiseException("Missing cfg file. Did you load it?")
def disableDemo(self):
self.cfg["Workflow"]["Demo"]["Enable"] = "False"
def enableDemo(self):
self.cfg["Workflow"]["Demo"]["Enable"] = "True"
def getCfg(self):
return copy.deepcopy(self.cfg)
def getPlugin(self, pluginType, pluginName):
for plugin in self.cfg["Plugin"]:
if plugin["Type"] == pluginType and plugin["Name"] == pluginName:
return plugin
Error.raiseException("Plugin {0}:{1} not found.".format(pluginType, pluginName))
def getPlugins(self):
if len(self.cfg["Plugin"]) < 1:
return {"Plugin:":[]}
return {"Plugin": self.cfg["Plugin"]}
def getPluginsByType(self, pluginType):
plugins = []
if pluginType == "Input" or pluginType == "Output":
pluginType = "IO"
for plugin in self.cfg["Plugin"]:
if plugin["Type"] == pluginType:
plugins.append(plugin)
if len(plugins) < 1:
return {"Plugin:":[]}
return {"Plugin": plugins}
def getPluginMethod(self, pluginType, pluginName, pluginMethod):
methods = self.getPlugin(pluginType, pluginName)["Method"]
for method in methods:
if pluginMethod == method["Name"]:
return method
Error.raiseException(
"Can't find {0}::{1}::{2}()".format(
pluginType, pluginName, pluginMethod))
def getPluginMethods(self, pluginType, pluginName):
methods = self.getPlugin(pluginType, pluginName)["Method"]
if methods is None or len(methods) < 1:
return {"Methods:":[]}
return {"Method": methods}
def getProjectDescription(self):
return self.cfg["Description"]
def getProjectID(self):
return self.cfg["ProjectID"]
def getWorkflow(self):
return self.cfg["Workflow"]
def getWorkflowDemoPluginChoices(self):
choices = []
for i in range(0, len(self.cfg["Workflow"]["Demo"]["Plugin"])):
self.cfg["Workflow"]["Demo"]["Plugin"][i]
self.cfg["Workflow"]["Demo"]["Method"][i]
choices.append(
{"Name": self.cfg["Workflow"]["Demo"]["Plugin"][i],
"Method": self.cfg["Workflow"]["Demo"]["Method"][i]})
if len(choices) < 1:
return None
return choices
def getWorkflowDemoPluginCount(self):
return len(self.cfg["Workflow"]["Demo"]["Plugin"])
def getWorkflowInputSource(self):
source = self.getWorkflowPlugin("Input")["Source"]
if Text.isNothing(source):
return None
return source
def getWorkflowOutputTarget(self):
target = self.getWorkflowPlugin("Output")["Target"]
if Text.isNothing(target):
return None
return target
def getWorkflowPlugin(self, pluginType):
plugin = self.cfg["Workflow"][pluginType]
plugin["Type"] = pluginType
plugin["Alias"] = pluginType
if pluginType == "Input" or pluginType == "Output":
plugin["Alias"] = "IO"
plugin["__workflowSourcePath__"] = self.getWorkflowSourcePath()
plugin["__workflowTargetPath__"] = self.getWorkflowTargetPath()
plugin["__projectID__"] = self.getProjectID()
plugin["__projectDescription__"] = self.getProjectDescription()
return plugin
def getWorkflowSourcePath(self):
if Text.isNothing(self.cfg["Workflow"]["Input"]["Source"]):
return None
return self.cfg["Workflow"]["Input"]["Source"]
def getWorkflowTargetPath(self):
if Text.isNothing(self.cfg["Workflow"]["Output"]["Target"]):
return None
return self.cfg["Workflow"]["Output"]["Target"]
def isWorkflowDemoEnabled(self):
return Text.isTrue(self.cfg["Workflow"]["Demo"]["Enable"])
def isWorkflowEditTrue(self, pluginType):
if pluginType == "Demo":
return False
return Text.isTrue(self.cfg["Workflow"][pluginType]["Edit"])
def isWorkflowDebugTrue(self, pluginType):
if pluginType == "Demo":
return False
return Text.isTrue(self.cfg["Workflow"][pluginType]["Debug"])
def load(self, verifyFlag=True):
__cfgPath = self.cfgPath
if not os.path.isfile(__cfgPath):
Error.raiseException("Can't find cfg file: {0}".format(__cfgPath))
with open(__cfgPath) as fd:
self.cfg = json.loads(fd.read())
for name, value in self.cfg.items():
self.__dict__[name] = value
if verifyFlag:
self.verify()
self.cfgPath = __cfgPath
def save(self, path):
with open(path, "w") as fd:
json.dump(self.cfg, fd)
def show(self):
if self.cfg is None or self.cfgPath is None:
Msg.showWarning("No information found for cfg file. Did you load it?")
return
pprint(self.cfg)
Msg.flush()
def verify(self):
for name, value in self.cfg.items():
if name == "ProjectID":
if len(value) > 256 or Text.isNothing(value):
Error.raiseException(
"{0} can only be 256 characters or less: {1}".format(name, value))
if re.search(r'[^A-Za-z0-9_\-\\]', value):
Error.raiseException(
"{0} contains invalid characters: {1}".format(name, value))
if Text.isNothing(value):
Error.raiseException(
"Missing '{0}' value in {1}".format(name, self.cfgPath))
pluginLookupMap = []
for plugin in self.cfg["Plugin"]:
pluginMethods = self.getPluginMethods(plugin["Type"], plugin["Name"])
for pluginMethod in pluginMethods["Method"]:
if not Framework.hasPluginClassMethod(
plugin["Type"], plugin["Name"], pluginMethod["Name"]):
Error.raiseException(
"Can't find {0}::{1}::{2}()".format(
plugin["Type"], plugin["Name"], pluginMethod["Name"]))
pluginLookupMap.append(
"{0}{1}{2}".format(plugin["Type"], plugin["Name"], pluginMethod["Name"]))
if len(self.cfg["Workflow"]["Demo"]["Plugin"]) != len(self.cfg["Workflow"]["Demo"]["Method"]):
Error.raiseException("Mismatched number of demo plugins and methods")
workflowPluginLookupMap = []
for workflowPluginType, workflowPluginCfg in self.cfg["Workflow"].items():
pluginType = self.pluginTypeAlias[workflowPluginType]
if pluginType != "Demo":
workflowPluginLookupMap.append(
"{0}{1}{2}".format(pluginType, workflowPluginCfg["Plugin"],
workflowPluginCfg["Method"]))
else:
for i in range(0, len(workflowPluginCfg["Plugin"])):
key = "{0}{1}{2}".format(pluginType, workflowPluginCfg["Plugin"][i],
workflowPluginCfg["Method"][i])
if key not in pluginLookupMap:
Error.raiseException(
"Can't find workflow plugin {0}::{1}::{2}()".format(
workflowPluginType, workflowPluginCfg["Plugin"][i],
workflowPluginCfg["Method"][i]))
class CfgEditor(object):
def __init__(self):
self.cfg = None
self.cfgPath = None
def deleteCfg(self):
if Text.isNone(self.cfgPath):
return
File.delete(self.cfgPath)
def getCfg(self):
return copy.deepcopy(self.cfg)
def loadCfg(self, cfgPath):
self.cfgPath = cfgPath
with open(cfgPath) as fd:
self.cfg = json.load(fd)
def saveCfg(self, cfgPath=None):
if cfgPath is not None:
self.cfgPath = cfgPath
if cfgPath is None and self.cfgPath is None:
Error.raiseException("You must specify a path to save cfg file")
if self.cfg is None:
Error.raiseException("No cfg loaded or set")
with open(self.cfgPath, "w") as fd:
json.dump(self.cfg, fd)
def setCfg(self, cfg):
self.cfg = cfg
self.cfgPath = None
def setDatabase(self, cfg):
for name, value in cfg.items():
self.cfg["Database"][name] = value
def setDatabaseName(self, name):
self.cfg["Database"]["Name"] = name
def setProjectID(self, projectID):
self.cfg["ProjectID"] = projectID
def setWorkflowInputSource(self, path):
self.cfg["Workflow"]["Input"]["Source"] = path
def setWorkflowOutputTarget(self, path):
self.cfg["Workflow"]["Output"]["Target"] = path
def setWorkflowPlugin(self, pluginType, cfg):
for name, value in cfg.items():
self.cfg["Workflow"][pluginType][name] = value
```
#### File: COCOSCATS/Core/Framework.py
```python
import glob
import importlib
import json
import os
from Core.Directory import Directory
from Core.Msg import Msg
class Framework():
@staticmethod
def getDataDir():
return "{0}/Data".format(Framework.getInstallDir())
@staticmethod
def getInstallDir():
return Directory.getCanonicalPath(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
@staticmethod
def getPluginMethod(pluginInstance, methodName):
method = None
try:
method = getattr(pluginInstance, methodName)
except Exception as e:
Error.handleException(
"Unknown class/method {0}/{1}".format(
pluginInstance.__class__,
methodName), True, True)
return method
@staticmethod
def getPluginsDir():
return "{0}/Plugin".format(Framework.getInstallDir())
@staticmethod
def getTestDir():
return "{0}/Test".format(Framework.getInstallDir())
@staticmethod
def getVaultDir():
return "{0}/Vault".format(Framework.getInstallDir())
@staticmethod
def getWebDir():
return "{0}/Web".format(Framework.getInstallDir())
@staticmethod
def getPluginFiles(pluginDirName):
installDir = Framework.getInstallDir()
pluginDir = "{0}/Plugin/{1}".format(installDir, pluginDirName)
if not os.path.isdir(pluginDir):
raise Exception("Unknown plugin directory: {0}".format(pluginDir))
exclude = ["__init__.py", "Interface.py"]
plugins = []
for fileName in os.listdir(pluginDir):
if fileName.endswith(".py") and not fileName in exclude:
name = os.path.splitext(fileName)[0]
plugin = {"Class": fileName,
"Import": "Plugin.{0}.{1}".format(pluginDirName, name),
"Name": name,
"Path": "{0}/{1}".format(pluginDir, fileName)}
plugins.append(plugin)
if len(plugins) < 1:
return None
return plugins
@staticmethod
def hasPluginClass(pluginType, pluginName):
try:
plugin = getattr(importlib.import_module(
"Plugin.{0}.{1}".format(pluginType, pluginName)), pluginName)
except Exception as e:
return False
return True
@staticmethod
def hasPluginClassMethod(pluginType, pluginName, pluginMethod):
plugin = None
try:
plugin = getattr(importlib.import_module(
"Plugin.{0}.{1}".format(pluginType, pluginName)), pluginName)
except Exception as e:
print(e)
return False
return pluginMethod in dir(plugin)
@staticmethod
def showAllPluginFiles():
Msg.show("Plugins")
Framework.showPluginFiles("IO")
Framework.showPluginFiles("Analyzer")
Framework.showPluginFiles("Translator")
@staticmethod
def showPluginFiles(pluginDirName):
print("\n[{0}]".format(pluginDirName))
plugins = Framework.getPluginFiles(pluginDirName)
if plugins is None:
Msg.showWarning("Can't find an plugins")
return
for plugin in plugins:
print(" {0}".format(plugin["Name"]))
```
#### File: Plugin/Analyzer/Nltk.py
```python
from Plugin.Interface import Interface
import nltk
from nltk.collocations import *
from nltk.tokenize import word_tokenize
import string
class Nltk(Interface):
def __init__(self, cfg, pluginParams, workflowPluginParams, frameworkParams):
super(Nltk, self).__init__(cfg, pluginParams, workflowPluginParams, frameworkParams)
def runSingleWords(self):
percentage = float(self.getPluginParamValue("Percentage")) / 100.0
minCharLength = int(self.getPluginParamValue("MinCharLength"))
posFilter = self.getPluginParamValue("POS")
inputContent = self.getInputContent().lower()
punctuation = string.punctuation.replace("-", "")
puncFilter = dict((ord(char), None) for char in punctuation)
tokens = nltk.word_tokenize(inputContent.translate(puncFilter))
tokensCnt = len(tokens)
if tokensCnt < 1:
self.raiseException("No words found")
maxTokensCnt = int(percentage * tokensCnt)
tags = nltk.pos_tag(tokens)
pos = [(token, nltk.map_tag('en-ptb', 'universal', tag)) for token, tag in tags]
filteredTokens1 = []
for p in pos:
if len(p[0]) < minCharLength:
continue
if p[1] not in posFilter:
continue
filteredTokens1.append(p)
freqTokens = nltk.FreqDist(tokens)
content = ""
cnt = 0
for freqToken in freqTokens.most_common(tokensCnt):
for token in filteredTokens1:
if freqToken[0] == token[0]:
content = "{0}\n{1},{2},{3}".format(content, token[0], token[1], freqToken[1])
cnt += 1
break
if cnt >= maxTokensCnt:
break
content = content.strip()
self.setAnalyzerContent(content)
return content
```
#### File: Plugin/Demo/TextEditor.py
```python
from Plugin.Interface import Interface
from Core.File import File
import os
import platform
import sys
import subprocess
class TextEditor(Interface):
def __init__(self, cfg, pluginParams, workflowPluginParams, frameworkParams):
super(TextEditor, self).__init__(cfg, pluginParams, workflowPluginParams, frameworkParams)
def __exe(self, cmd):
print("Execute: {0}".format(cmd))
proc = subprocess.run(cmd, universal_newlines=True, shell=True, check=False)
print("Exit Status: {0}".format(proc.returncode))
return proc.returncode
def __isWindows(self):
return platform.system().upper() == "WINDOWS"
def run(self):
application = self.getPluginParamValue("Application").lower()
source = self.getPluginParamValue("Source").lower()
content = None
path = None
cmd = None
if source == "database":
content = self.getOutputContentDB()
path = File.setContentToTempFile(content["Content"], ".txt")
elif source == "path" or source == "target":
path = self.getWorkflowTarget()
if self.getPluginParamValue("Application").lower() == "default":
if self.__isWindows():
cmd = "start {0}".format(path)
else:
cmd = "open {0}".format(path)
else:
cmd = self.getPluginParamValue("Application")
return self.__exe(cmd)
```
#### File: Plugin/Translator/Azure.py
```python
from Plugin.Interface import Interface
import json
import os
import re
import requests
from xml.etree import ElementTree
class Azure(Interface):
def __init__(self, cfg, pluginParams, workflowPluginParams, frameworkParams):
super(Azure, self).__init__(cfg, pluginParams, workflowPluginParams, frameworkParams)
def __chaffAndLog(self, inputContent, translatedInputContent, analyzerContent, analyzerTokensStr, translatedAnalyzerTokensStr):
acceptedTokens = []
rejectedTokens = []
analyzerTokens = analyzerTokensStr.lower().strip().split(".")
analyzerTokensCnt = len(analyzerTokens)
analyzerContentTokens = analyzerContent.split("\n")
analyzerContentTokensCnt = len(analyzerContentTokens)
translatedAnalyzerTokens = translatedAnalyzerTokensStr.lower().strip().split(".")
translatedAnalyzerTokensCnt = len(translatedAnalyzerTokens)
translatedInputContent = translatedInputContent.lower()
if analyzerTokensCnt != translatedAnalyzerTokensCnt:
self.raiseExecption("Unexpected mismatched translation counts. Don't know what belongs to what")
for i in range(0, translatedAnalyzerTokensCnt):
l1a = analyzerTokens[i].replace("'","")
l2a = translatedAnalyzerTokens[i].replace("'","")
if re.search(l2a, translatedInputContent, re.IGNORECASE):
acceptedTokens.append(self.__getAnalyzerMatch(l1a, l2a, analyzerContentTokens))
else:
rejectedTokens.append(self.__getAnalyzerMatch(l1a, l2a, analyzerContentTokens))
content = """[VOCABULARY]
{0}
[REJECTED]
{1}
[L1]
{2}
[L2]
{3}
""".format(
"\n".join(acceptedTokens), "\n".join(rejectedTokens),
inputContent, translatedInputContent)
self.setTranslatorContent(content)
return content
def __checkResponse(self, response, msg):
if response is None or response.status_code != 200:
self.raiseException("Azure: {0}\n{1}\nStatus Code: {2}\n{3}".format(
response.headers, response.text, response.status_code, msg))
def __getAccessToken(self):
response = None
credentials = self.getCredentials()
try:
url = "https://api.cognitive.microsoft.com/sts/v1.0/issueToken"
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Ocp-Apim-Subscription-Key": credentials["AccessKey"]
}
del(credentials)
response = requests.post(url, headers=headers)
self.__checkResponse(response, "Failed to get access token")
except Exception as e:
self.raiseException(e)
return response.text.strip()
def __getAccessTokenX(self):
return None
def __getAnalyzerMatch(self, l1, l2, analyzerContentTokens):
match = None
for token in analyzerContentTokens:
l1b, pos, freq = token.strip().split(",")
if l1 == l1b:
match = "{0},{1},{2},{3}".format(l1, l2, pos, freq)
break
if match is None:
self.raiseException("Missing analyzer token somewhere")
return match
def __getAnalyzerTokensStr(self, analyzerContent):
tokens = analyzerContent.split("\n")
content = ""
for token in tokens:
content = "{0}'{1}'.".format(content, token.split(",")[0])
return content[:-1].strip().lower()
def __getInputTranslationStr(self, inputContent, accessToken):
return self.__getTranslation(inputContent, accessToken)
def __getSupportedLanguages(self, accessToken):
languages = []
try:
url = "https://api.microsofttranslator.com/v2/http.svc/GetLanguagesForTranslate"
headers = {
"Accept": "application/xml",
"Authorization": "Bearer {0}".format(accessToken)
}
response = requests.get(url, headers=headers)
self.__checkResponse(response, "Failed to get supported language list")
root = ElementTree.fromstring(response.text.encode('utf-8'))
for child in root.getchildren():
languages.append(child.text)
languages.sort()
except Exception as e:
self.raiseException(e)
return languages
def __getTranslation(self, content, accessToken):
url = "https://api.microsofttranslator.com/v2/http.svc/Translate?text={0}&from={1}&to={2}&contentType=text%2Fplain".format(
content, self.getPluginParamValue("L1"), self.getPluginParamValue("L2"))
headers = {
"Accept": "application/xml",
"Authorization": "Bearer {0}".format(accessToken)
}
response = requests.get(url, headers=headers)
self.__checkResponse(response, "Failed to get translation")
root = ElementTree.fromstring(response.text.encode('utf-8'))
translatedContent = root.text.strip()
return translatedContent
def __getTranslatedAnalyzerContent(self, analyzerContent, accessToken):
return self.__getTranslation(analyzerContent, accessToken).strip()
def __getTranslatedAnalyzerTokensStr(self, analyzerTokensStr, accessToken):
raw = self.__getTranslation(analyzerTokensStr, accessToken).strip().lower().split(".")
content = ""
for r in raw:
r = r.replace("'", "").strip()
content = "{0}'{1}'.".format(content, r.replace("'", "").strip())
return content[:-1]
def __getTranslatedAnalyzerTokensStrOneByOne(self, analyzerTokensStr, accessToken):
tokens = analyzerTokensStr.split(".")
content = ""
for token in tokens:
content = "{0}'{1}'.".format(content, self.__getTranslation(token.replace("'", "").strip(), accessToken))
content = content[:-1]
return content
def __getTranslatedInputContent(self, inputContent, accessToken):
return self.__getTranslation(inputContent, accessToken)
def __getTranslatedAnalyzerTokensStrX(self, analyzerTokensStr, accessToken):
content = """'kami'.'itu'.'rumah'.'dan'.'adalah'.'hidup'.'hijau'.'tahun'.'memiliki'.'dapur'.'tapi'.'adalah'.'anjing'.'berlantai dua'.'membeli'.'kamar tidur'.'kamar mandi'.'hidup'.'kamar'.'windows'.'bersih'.'dua-mobil'.'garasi'.'kotor'.'tetangga'.'bagus'.'mereka'.'kulit'.'banyak'.'memiliki'"""
return content
def __getTranslatedAnalyzerTokensStrOneByOneX(self, inputContent, accessToken):
return self.__getTranslatedAnalyzerTokensStrX(sinputContent, accessToken)
def __getTranslatedInputContentX(self, inputContent, accessToken):
content = """Aku tinggal di sebuah rumah dua lantai, hijau. Kami membeli dua puluh tahun yang lalu. Ini memiliki tiga kamar tidur, 2 Kamar mandi, dapur dan ruang tamu. Jendela bersih, tetapi dua-mobil garasi kotor. Tetangga bagus, tapi kulit anjing mereka terlalu banyak. Aku harus memotong rumput setiap minggu. Anjing-anjing ingin buang air kecil pada rumput hijau yang menjadikannya kuning. Kami direnovasi dapur bulan lalu. Ini memiliki wastafel, kulkas, oven dan kompor. Hipotek adalah affortable. Pajak properti dan asuransi yang terlalu tinggi walaupun. Anak-anak saya dibesarkan di rumah ini. Mereka meninggalkan rumah untuk perguruan beberapa tahun yang lalu. Sekarang kita hidup oleh diri kita sendiri di rumah. Kami mengunci pintu setiap malam."""
return content
def __removeUnexpectedCharacters(self, content):
chars = ["#"]
for c in chars:
content = content.replace(c, "")
return content
def runTranslate(self):
analyzerContent = self.getAnalyzerContent()
try:
accessToken = self.__getAccessToken()
inputContent = self.__removeUnexpectedCharacters(self.getInputContent())
translatedInputContent = self.__getTranslatedInputContent(inputContent, accessToken)
analyzerContent = self.getAnalyzerContent()
analyzerTokensStr = self.__getAnalyzerTokensStr(analyzerContent)
if self.getPluginParamValue("SearchOneByOne").lower() == "false":
translatedAnalyzerTokensStr = self.__getTranslatedAnalyzerTokensStr(analyzerTokensStr, accessToken)
else:
translatedAnalyzerTokensStr = self.__getTranslatedAnalyzerTokensStrOneByOne(analyzerTokensStr, accessToken)
return self.__chaffAndLog(inputContent, translatedInputContent, analyzerContent, analyzerTokensStr, translatedAnalyzerTokensStr)
except Exception as e:
self.raiseException(e)
``` |
{
"source": "johnlwhiteman/cs6475_computational_photography",
"score": 3
} |
#### File: cs6475_computational_photography/final_project/demo.py
```python
import cv2
import numpy as np
import sys
from StegaBits import *
def show_demo():
visible_image_path = "demo_visible.png"
hidden_image_path = "demo_hidden.png"
output_image_path = "demo_results.png"
seed = None
shared_secret_key = None
alice_text = "Dr. <NAME> will be the next James Bond!"
alice_text_checksum = hashlib.sha256(alice_text).hexdigest()
try:
from cryptography.fernet import Fernet
shared_secret_key = Fernet.generate_key()
except Exception:
pass
print("Alice is working")
alice = StegaBits(visible_image_path, shared_secret_key, seed)
alice.hide(alice_text, hidden_image_path)
alice.save(output_image_path)
alice_image_checksum = alice.get_image_checksum()
cv2.imshow("Alice's Unmodified Image", cv2.imread(visible_image_path))
cv2.waitKey(0)
cv2.imshow("Alice's Unmodified Hidden Image", cv2.imread(hidden_image_path))
cv2.waitKey(0)
print("Alice's secret message: %s" %(alice_text))
cv2.imshow("Alice's Modified Image", alice.image)
cv2.waitKey(0)
print("Bob is working")
bob = StegaBits(output_image_path, shared_secret_key, seed)
bob_text = bob.get_text()
bob_encrypted_text = bob.get_text(dont_decrypt=True)
bob_text_info = bob.get_text_info()
bob_text_checksum = bob.get_text_checksum()
bob_image = bob.get_image()
bob_image_info = bob.get_image_info()
bob_image_checksum = bob.get_image_checksum()
bob.show("Bob's Received Modified Image")
print """
Text:
Alice: %s
Bob: %s
Text Checksum:
Alice: %s
Bob: %s
Image Checksum:
Alice: %s
Bob: %s
Image Details:
Width: %s
Height: %s
Offset: %s
Seed: %s
Text Details:
Length: %s
Offset: %s
Cryptography:
Key: %s
Encrypted: %s
""" %(alice_text, bob_text, alice_text_checksum, bob_text_checksum,
alice_image_checksum, bob_image_checksum,
bob_image_info['width'], bob_image_info['height'],
bob_image_info['offset'], bob_image_info['seed'],
bob_text_info['length'], bob_text_info['offset'],
shared_secret_key, bob_encrypted_text)
bob.show_image("Bob's Extracted Hidden Image")
bob.show_images("Bob's Shows a Montage")
if __name__ == "__main__":
show_demo()
``` |
{
"source": "johnlyzhou/behavenet",
"score": 3
} |
#### File: behavenet/plotting/arhmm_utils.py
```python
import pickle
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.animation as animation
from behavenet import make_dir_if_not_exists
from behavenet.models import AE as AE
from behavenet.plotting import save_movie
# to ignore imports for sphix-autoapidoc
__all__ = [
'get_discrete_chunks', 'get_state_durations', 'get_latent_arrays_by_dtype',
'get_model_latents_states',
'make_syllable_movies_wrapper', 'make_syllable_movies',
'real_vs_sampled_wrapper', 'make_real_vs_sampled_movies', 'plot_real_vs_sampled',
'plot_states_overlaid_with_latents', 'plot_state_transition_matrix', 'plot_dynamics_matrices',
'plot_obs_biases', 'plot_obs_covariance_matrices']
def get_discrete_chunks(states, include_edges=True):
"""Find occurences of each discrete state.
Parameters
----------
states : :obj:`list`
list of trials; each trial is numpy array containing discrete state for each frame
include_edges : :obj:`bool`
include states at start and end of chunk
Returns
-------
:obj:`list`
list of length discrete states; each list contains all occurences of that discrete state by
:obj:`[chunk number, starting index, ending index]`
"""
max_state = max([max(x) for x in states])
indexing_list = [[] for _ in range(max_state + 1)]
for i_chunk, chunk in enumerate(states):
# pad either side so we get start and end chunks
chunk = np.pad(chunk, (1, 1), mode='constant', constant_values=-1)
# don't add 1 because of start padding, this is now index in original unpadded data
split_indices = np.where(np.ediff1d(chunk) != 0)[0]
# last index will be 1 higher that it should be due to padding
# split_indices[-1] -= 1
for i in range(len(split_indices)-1):
# get which state this chunk was (+1 because data is still padded)
which_state = chunk[split_indices[i]+1]
if not include_edges:
if split_indices[i] != 0 and split_indices[i+1] != (len(chunk)-2):
indexing_list[which_state].append(
[i_chunk, split_indices[i], split_indices[i+1]])
else:
indexing_list[which_state].append(
[i_chunk, split_indices[i], split_indices[i+1]])
# convert lists to numpy arrays
indexing_list = [np.asarray(indexing_list[i_state]) for i_state in range(max_state + 1)]
return indexing_list
def get_state_durations(latents, hmm, include_edges=True):
"""Calculate frame count for each state.
Parameters
----------
latents : :obj:`list` of :obj:`np.ndarray`
latent states
hmm : :obj:`ssm.HMM`
arhmm objecct
include_edges : :obj:`bool`
include states at start and end of chunk
Returns
-------
:obj:`list`
number of frames for each state run; list is empty if single-state model
"""
if hmm.K == 1:
return []
states = [hmm.most_likely_states(x) for x in latents if len(x) > 0]
state_indices = get_discrete_chunks(states, include_edges=include_edges)
durations = []
for i_state in range(0, len(state_indices)):
if len(state_indices[i_state]) > 0:
durations.append(np.concatenate(np.diff(state_indices[i_state][:, 1:3], 1)))
else:
durations.append(np.array([]))
return durations
def get_latent_arrays_by_dtype(data_generator, sess_idxs=0, data_key='ae_latents'):
"""Collect data from data generator and put into dictionary with dtypes for keys.
Parameters
----------
data_generator : :obj:`ConcatSessionsGenerator`
sess_idxs : :obj:`int` or :obj:`list`
concatenate train/test/val data across one or more sessions
data_key : :obj:`str`
key into data generator object; 'ae_latents' | 'labels'
Returns
-------
:obj:`tuple`
- latents (:obj:`dict`): with keys 'train', 'val', 'test'
- trial indices (:obj:`dict`): with keys 'train', 'val', 'test'
"""
if isinstance(sess_idxs, int):
sess_idxs = [sess_idxs]
dtypes = ['train', 'val', 'test']
latents = {key: [] for key in dtypes}
trial_idxs = {key: [] for key in dtypes}
for sess_idx in sess_idxs:
dataset = data_generator.datasets[sess_idx]
for data_type in dtypes:
curr_idxs = dataset.batch_idxs[data_type]
trial_idxs[data_type] += list(curr_idxs)
latents[data_type] += [dataset[i_trial][data_key][0][:] for i_trial in curr_idxs]
return latents, trial_idxs
def get_model_latents_states(
hparams, version, sess_idx=0, return_samples=0, cond_sampling=False, dtype='test',
dtypes=['train', 'val', 'test'], rng_seed=0):
"""Return arhmm defined in :obj:`hparams` with associated latents and states.
Can also return sampled latents and states.
Parameters
----------
hparams : :obj:`dict`
needs to contain enough information to specify an arhmm
version : :obj:`str` or :obj:`int`
test tube model version (can be 'best')
sess_idx : :obj:`int`, optional
session index into data generator
return_samples : :obj:`int`, optional
number of trials to sample from model
cond_sampling : :obj:`bool`, optional
if :obj:`True` return samples conditioned on most likely state sequence; else return
unconditioned samples
dtype : :obj:`str`, optional
trial type to use for conditonal sampling; 'train' | 'val' | 'test'
dtypes : :obj:`array-like`, optional
trial types for which to collect latents and states
rng_seed : :obj:`int`, optional
random number generator seed to control sampling
Returns
-------
:obj:`dict`
- 'model' (:obj:`ssm.HMM` object)
- 'latents' (:obj:`dict`): latents from train, val and test trials
- 'states' (:obj:`dict`): states from train, val and test trials
- 'trial_idxs' (:obj:`dict`): trial indices from train, val and test trials
- 'latents_gen' (:obj:`list`)
- 'states_gen' (:obj:`list`)
"""
from behavenet.data.utils import get_transforms_paths
from behavenet.fitting.utils import experiment_exists
from behavenet.fitting.utils import get_best_model_version
from behavenet.fitting.utils import get_expt_dir
from behavenet.fitting.utils import get_session_dir
hparams['session_dir'], sess_ids = get_session_dir(
hparams, session_source=hparams.get('all_source', 'save'))
hparams['expt_dir'] = get_expt_dir(hparams)
# get version/model
if version == 'best':
version = get_best_model_version(
hparams['expt_dir'], measure='val_loss', best_def='min')[0]
else:
_, version = experiment_exists(hparams, which_version=True)
if version is None:
raise FileNotFoundError(
'Could not find the specified model version in %s' % hparams['expt_dir'])
# load model
model_file = os.path.join(hparams['expt_dir'], 'version_%i' % version, 'best_val_model.pt')
with open(model_file, 'rb') as f:
hmm = pickle.load(f)
# load latents/labels
if hparams['model_class'].find('labels') > -1:
from behavenet.data.utils import load_labels_like_latents
all_latents = load_labels_like_latents(hparams, sess_ids, sess_idx)
else:
_, latents_file = get_transforms_paths('ae_latents', hparams, sess_ids[sess_idx])
with open(latents_file, 'rb') as f:
all_latents = pickle.load(f)
# collect inferred latents/states
trial_idxs = {}
latents = {}
states = {}
for data_type in dtypes:
trial_idxs[data_type] = all_latents['trials'][data_type]
latents[data_type] = [all_latents['latents'][i_trial] for i_trial in trial_idxs[data_type]]
states[data_type] = [hmm.most_likely_states(x) for x in latents[data_type]]
# collect sampled latents/states
if return_samples > 0:
states_gen = []
np.random.seed(rng_seed)
if cond_sampling:
n_latents = latents[dtype][0].shape[1]
latents_gen = [np.zeros((len(state_seg), n_latents)) for state_seg in states[dtype]]
for i_seg, state_seg in enumerate(states[dtype]):
for i_t in range(len(state_seg)):
if i_t >= 1:
latents_gen[i_seg][i_t] = hmm.observations.sample_x(
states[dtype][i_seg][i_t], latents_gen[i_seg][:i_t], input=np.zeros(0))
else:
latents_gen[i_seg][i_t] = hmm.observations.sample_x(
states[dtype][i_seg][i_t],
latents[dtype][i_seg][0].reshape((1, n_latents)), input=np.zeros(0))
else:
latents_gen = []
offset = 200
for i in range(return_samples):
these_states_gen, these_latents_gen = hmm.sample(
latents[dtype][0].shape[0] + offset)
latents_gen.append(these_latents_gen[offset:])
states_gen.append(these_states_gen[offset:])
else:
latents_gen = []
states_gen = []
return_dict = {
'model': hmm,
'latents': latents,
'states': states,
'trial_idxs': trial_idxs,
'latents_gen': latents_gen,
'states_gen': states_gen,
}
return return_dict
def make_syllable_movies_wrapper(
hparams, save_file, sess_idx=0, dtype='test', max_frames=400, frame_rate=10,
min_threshold=0, n_buffer=5, n_pre_frames=3, n_rows=None, single_syllable=None):
"""Present video clips of each individual syllable in separate panels.
This is a high-level function that loads the arhmm model described in the hparams dictionary
and produces the necessary states/video frames.
Parameters
----------
hparams : :obj:`dict`
needs to contain enough information to specify an arhmm
save_file : :obj:`str`
full save file (path and filename)
sess_idx : :obj:`int`, optional
session index into data generator
dtype : :obj:`str`, optional
types of trials to make video with; 'train' | 'val' | 'test'
max_frames : :obj:`int`, optional
maximum number of frames to animate
frame_rate : :obj:`float`, optional
frame rate of saved movie
min_threshold : :obj:`int`, optional
minimum number of frames in a syllable run to be considered for movie
n_buffer : :obj:`int`
number of blank frames between syllable instances
n_pre_frames : :obj:`int`
number of behavioral frames to precede each syllable instance
n_rows : :obj:`int` or :obj:`NoneType`
number of rows in output movie
single_syllable : :obj:`int` or :obj:`NoneType`
choose only a single state for movie
"""
from behavenet.data.data_generator import ConcatSessionsGenerator
from behavenet.data.utils import get_data_generator_inputs
from behavenet.data.utils import get_transforms_paths
from behavenet.fitting.utils import experiment_exists
from behavenet.fitting.utils import get_expt_dir
from behavenet.fitting.utils import get_session_dir
# load images, latents, and states
hparams['session_dir'], sess_ids = get_session_dir(
hparams, session_source=hparams.get('all_source', 'save'))
hparams['expt_dir'] = get_expt_dir(hparams)
hparams['load_videos'] = True
hparams, signals, transforms, paths = get_data_generator_inputs(hparams, sess_ids)
data_generator = ConcatSessionsGenerator(
hparams['data_dir'], sess_ids,
signals_list=[signals[sess_idx]],
transforms_list=[transforms[sess_idx]],
paths_list=[paths[sess_idx]],
device='cpu', as_numpy=True, batch_load=False, rng_seed=hparams['rng_seed_data'])
ims_orig = data_generator.datasets[sess_idx].data['images']
del data_generator # free up memory
# get tt version number
_, version = experiment_exists(hparams, which_version=True)
print('producing syllable videos for arhmm %s' % version)
# load latents/labels
if hparams['model_class'].find('labels') > -1:
from behavenet.data.utils import load_labels_like_latents
latents = load_labels_like_latents(hparams, sess_ids, sess_idx)
else:
_, latents_file = get_transforms_paths('ae_latents', hparams, sess_ids[sess_idx])
with open(latents_file, 'rb') as f:
latents = pickle.load(f)
trial_idxs = latents['trials'][dtype]
# load model
model_file = os.path.join(hparams['expt_dir'], 'version_%i' % version, 'best_val_model.pt')
with open(model_file, 'rb') as f:
hmm = pickle.load(f)
# infer discrete states
states = [hmm.most_likely_states(latents['latents'][s]) for s in latents['trials'][dtype]]
if len(states) == 0:
raise ValueError('No latents for dtype=%s' % dtype)
# find runs of discrete states; state indices is a list, each entry of which is a np array with
# shape (n_state_instances, 3), where the 3 values are:
# chunk_idx, chunk_start_idx, chunk_end_idx
# chunk_idx is in [0, n_chunks], and indexes trial_idxs
state_indices = get_discrete_chunks(states, include_edges=True)
K = len(state_indices)
# get all example over minimum state length threshold
over_threshold_instances = [[] for _ in range(K)]
for i_state in range(K):
if state_indices[i_state].shape[0] > 0:
state_lens = np.diff(state_indices[i_state][:, 1:3], axis=1)
over_idxs = state_lens > min_threshold
over_threshold_instances[i_state] = state_indices[i_state][over_idxs[:, 0]]
np.random.shuffle(over_threshold_instances[i_state]) # shuffle instances
make_syllable_movies(
ims_orig=ims_orig,
state_list=over_threshold_instances,
trial_idxs=trial_idxs,
save_file=save_file,
max_frames=max_frames,
frame_rate=frame_rate,
n_buffer=n_buffer,
n_pre_frames=n_pre_frames,
n_rows=n_rows,
single_syllable=single_syllable)
def make_syllable_movies(
ims_orig, state_list, trial_idxs, save_file=None, max_frames=400, frame_rate=10,
n_buffer=5, n_pre_frames=3, n_rows=None, single_syllable=None):
"""Present video clips of each individual syllable in separate panels
Parameters
----------
ims_orig : :obj:`np.ndarray`
shape (n_frames, n_channels, y_pix, x_pix)
state_list : :obj:`list`
each entry (one per state) contains all occurences of that discrete state by
:obj:`[chunk number, starting index, ending index]`
trial_idxs : :obj:`array-like`
indices into :obj:`states` for which trials should be plotted
save_file : :obj:`str`
full save file (path and filename)
max_frames : :obj:`int`, optional
maximum number of frames to animate
frame_rate : :obj:`float`, optional
frame rate of saved movie
n_buffer : :obj:`int`, optional
number of blank frames between syllable instances
n_pre_frames : :obj:`int`, optional
number of behavioral frames to precede each syllable instance
n_rows : :obj:`int` or :obj:`NoneType`, optional
number of rows in output movie
single_syllable : :obj:`int` or :obj:`NoneType`, optional
choose only a single state for movie
"""
K = len(state_list)
# Initialize syllable movie frames
plt.clf()
if single_syllable is not None:
K = 1
fig_width = 5
n_rows = 1
else:
fig_width = 10 # aiming for dim 1 being 10
# get video dims
bs, n_channels, y_dim, x_dim = ims_orig[0].shape
movie_dim1 = n_channels * y_dim
movie_dim2 = x_dim
if n_rows is None:
n_rows = int(np.floor(np.sqrt(K)))
n_cols = int(np.ceil(K / n_rows))
fig_dim_div = movie_dim2 * n_cols / fig_width
fig_width = (movie_dim2 * n_cols) / fig_dim_div
fig_height = (movie_dim1 * n_rows) / fig_dim_div
fig, axes = plt.subplots(n_rows, n_cols, figsize=(fig_width, fig_height))
for i, ax in enumerate(fig.axes):
ax.set_yticks([])
ax.set_xticks([])
if i >= K:
ax.set_axis_off()
elif single_syllable is not None:
ax.set_title('Syllable %i' % single_syllable, fontsize=16)
else:
ax.set_title('Syllable %i' % i, fontsize=16)
fig.tight_layout(pad=0, h_pad=1.005)
imshow_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}
ims = [[] for _ in range(max_frames + bs + 200)]
# Loop through syllables
for i_k, ax in enumerate(fig.axes):
# skip if no syllable in this axis
if i_k >= K:
continue
print('processing syllable %i/%i' % (i_k + 1, K))
# skip if no syllables are longer than threshold
if len(state_list[i_k]) == 0:
continue
if single_syllable is not None:
i_k = single_syllable
i_chunk = 0
i_frame = 0
while i_frame < max_frames:
if i_chunk >= len(state_list[i_k]):
# show blank if out of syllable examples
im = ax.imshow(np.zeros((movie_dim1, movie_dim2)), **imshow_kwargs)
ims[i_frame].append(im)
i_frame += 1
else:
# Get movies/latents
chunk_idx = state_list[i_k][i_chunk, 0]
which_trial = trial_idxs[chunk_idx]
tr_beg = state_list[i_k][i_chunk, 1]
tr_end = state_list[i_k][i_chunk, 2]
batch = ims_orig[which_trial]
movie_chunk = batch[max(tr_beg - n_pre_frames, 0):tr_end]
movie_chunk = np.concatenate(
[movie_chunk[:, j] for j in range(movie_chunk.shape[1])], axis=1)
# if np.sum(states[chunk_idx][tr_beg:tr_end-1] != i_k) > 0:
# raise ValueError('Misaligned states for syllable segmentation')
# Loop over this chunk
for i in range(movie_chunk.shape[0]):
im = ax.imshow(movie_chunk[i], **imshow_kwargs)
ims[i_frame].append(im)
# Add red box if start of syllable
syllable_start = n_pre_frames if tr_beg >= n_pre_frames else tr_beg
if syllable_start <= i < (syllable_start + 2):
rect = matplotlib.patches.Rectangle(
(5, 5), 10, 10, linewidth=1, edgecolor='r', facecolor='r')
im = ax.add_patch(rect)
ims[i_frame].append(im)
i_frame += 1
# Add buffer black frames
for j in range(n_buffer):
im = ax.imshow(np.zeros((movie_dim1, movie_dim2)), **imshow_kwargs)
ims[i_frame].append(im)
i_frame += 1
i_chunk += 1
print('creating animation...', end='')
ani = animation.ArtistAnimation(
fig,
[ims[i] for i in range(len(ims)) if ims[i] != []], interval=20, blit=True, repeat=False)
print('done')
if save_file is not None:
# put together file name
if save_file[-3:] == 'mp4':
save_file = save_file[:-3]
if single_syllable is not None:
state_str = str('_syllable-%02i' % single_syllable)
else:
state_str = ''
save_file += state_str
save_file += '.mp4'
save_movie(save_file, ani, frame_rate=frame_rate)
def real_vs_sampled_wrapper(
output_type, hparams, save_file, sess_idx, dtype='test', conditional=True, max_frames=400,
frame_rate=20, n_buffer=5, xtick_locs=None, frame_rate_beh=None, format='png'):
"""Produce movie with (AE) reconstructed video and sampled video.
This is a high-level function that loads the model described in the hparams dictionary and
produces the necessary state sequences/samples. The sampled video can be completely
unconditional (states and latents are sampled) or conditioned on the most likely state
sequence.
Parameters
----------
output_type : :obj:`str`
'plot' | 'movie' | 'both'
hparams : :obj:`dict`
needs to contain enough information to specify an autoencoder
save_file : :obj:`str`
full save file (path and filename)
sess_idx : :obj:`int`, optional
session index into data generator
dtype : :obj:`str`, optional
types of trials to make plot/video with; 'train' | 'val' | 'test'
conditional : :obj:`bool`
conditional vs unconditional samples; for creating reconstruction title
max_frames : :obj:`int`, optional
maximum number of frames to animate
frame_rate : :obj:`float`, optional
frame rate of saved movie
n_buffer : :obj:`int`
number of blank frames between animated trials if more one are needed to reach
:obj:`max_frames`
xtick_locs : :obj:`array-like`, optional
tick locations in bin values for plot
frame_rate_beh : :obj:`float`, optional
behavioral video framerate; to properly relabel xticks
format : :obj:`str`, optional
any accepted matplotlib save format, e.g. 'png' | 'pdf' | 'jpeg'
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle if :obj:`output_type='plot'` or :obj:`output_type='both'`, else
nothing returned (movie is saved)
"""
from behavenet.data.utils import get_transforms_paths
from behavenet.fitting.utils import get_expt_dir
from behavenet.fitting.utils import get_session_dir
# check input - cannot create sampled movies for arhmm-labels models (no mapping from labels to
# frames)
if hparams['model_class'].find('labels') > -1:
if output_type == 'both' or output_type == 'movie':
print('warning: cannot create video with "arhmm-labels" model; producing plots')
output_type = 'plot'
# load latents and states (observed and sampled)
model_output = get_model_latents_states(
hparams, '', sess_idx=sess_idx, return_samples=50, cond_sampling=conditional)
if output_type == 'both' or output_type == 'movie':
# load in AE decoder
if hparams.get('ae_model_path', None) is not None:
ae_model_file = os.path.join(hparams['ae_model_path'], 'best_val_model.pt')
ae_arch = pickle.load(
open(os.path.join(hparams['ae_model_path'], 'meta_tags.pkl'), 'rb'))
else:
hparams['session_dir'], sess_ids = get_session_dir(
hparams, session_source=hparams.get('all_source', 'save'))
hparams['expt_dir'] = get_expt_dir(hparams)
_, latents_file = get_transforms_paths('ae_latents', hparams, sess_ids[sess_idx])
ae_model_file = os.path.join(os.path.dirname(latents_file), 'best_val_model.pt')
ae_arch = pickle.load(
open(os.path.join(os.path.dirname(latents_file), 'meta_tags.pkl'), 'rb'))
print('loading model from %s' % ae_model_file)
ae_model = AE(ae_arch)
ae_model.load_state_dict(
torch.load(ae_model_file, map_location=lambda storage, loc: storage))
ae_model.eval()
n_channels = ae_model.hparams['n_input_channels']
y_pix = ae_model.hparams['y_pixels']
x_pix = ae_model.hparams['x_pixels']
# push observed latents through ae decoder
ims_recon = np.zeros((0, n_channels * y_pix, x_pix))
i_trial = 0
while ims_recon.shape[0] < max_frames:
recon = ae_model.decoding(
torch.tensor(model_output['latents'][dtype][i_trial]).float(), None, None). \
cpu().detach().numpy()
recon = np.concatenate([recon[:, i] for i in range(recon.shape[1])], axis=1)
zero_frames = np.zeros((n_buffer, n_channels * y_pix, x_pix)) # add a few black frames
ims_recon = np.concatenate((ims_recon, recon, zero_frames), axis=0)
i_trial += 1
# push sampled latents through ae decoder
ims_recon_samp = np.zeros((0, n_channels * y_pix, x_pix))
i_trial = 0
while ims_recon_samp.shape[0] < max_frames:
recon = ae_model.decoding(torch.tensor(
model_output['latents_gen'][i_trial]).float(), None, None).cpu().detach().numpy()
recon = np.concatenate([recon[:, i] for i in range(recon.shape[1])], axis=1)
zero_frames = np.zeros((n_buffer, n_channels * y_pix, x_pix)) # add a few black frames
ims_recon_samp = np.concatenate((ims_recon_samp, recon, zero_frames), axis=0)
i_trial += 1
make_real_vs_sampled_movies(
ims_recon, ims_recon_samp, conditional=conditional, save_file=save_file,
frame_rate=frame_rate)
if output_type == 'both' or output_type == 'plot':
i_trial = 0
latents = model_output['latents'][dtype][i_trial][:max_frames]
states = model_output['states'][dtype][i_trial][:max_frames]
latents_samp = model_output['latents_gen'][i_trial][:max_frames]
if not conditional:
states_samp = model_output['states_gen'][i_trial][:max_frames]
else:
states_samp = []
fig = plot_real_vs_sampled(
latents, latents_samp, states, states_samp, save_file=save_file, xtick_locs=xtick_locs,
frame_rate=hparams['frame_rate'] if frame_rate_beh is None else frame_rate_beh,
format=format)
if output_type == 'movie':
return None
elif output_type == 'both' or output_type == 'plot':
return fig
else:
raise ValueError('"%s" is an invalid output_type' % output_type)
def make_real_vs_sampled_movies(
ims_recon, ims_recon_samp, conditional, save_file=None, frame_rate=15):
"""Produce movie with (AE) reconstructed video and sampled video.
Parameters
----------
ims_recon : :obj:`np.ndarray`
shape (n_frames, y_pix, x_pix)
ims_recon_samp : :obj:`np.ndarray`
shape (n_frames, y_pix, x_pix)
conditional : :obj:`bool`
conditional vs unconditional samples; for creating reconstruction title
save_file : :obj:`str`, optional
full save file (path and filename)
frame_rate : :obj:`float`, optional
frame rate of saved movie
"""
n_frames = ims_recon.shape[0]
n_plots = 2
[y_pix, x_pix] = ims_recon[0].shape
fig_dim_div = x_pix * n_plots / 10 # aiming for dim 1 being 10
x_dim = x_pix * n_plots / fig_dim_div
y_dim = y_pix / fig_dim_div
fig, axes = plt.subplots(1, n_plots, figsize=(x_dim, y_dim))
for j in range(2):
axes[j].set_xticks([])
axes[j].set_yticks([])
axes[0].set_title('Real Reconstructions\n', fontsize=16)
if conditional:
title_str = 'Generative Reconstructions\n(Conditional)'
else:
title_str = 'Generative Reconstructions\n(Unconditional)'
axes[1].set_title(title_str, fontsize=16)
fig.tight_layout(pad=0)
im_kwargs = {'cmap': 'gray', 'vmin': 0, 'vmax': 1, 'animated': True}
ims = []
for i in range(n_frames):
ims_curr = []
im = axes[0].imshow(ims_recon[i], **im_kwargs)
ims_curr.append(im)
im = axes[1].imshow(ims_recon_samp[i], **im_kwargs)
ims_curr.append(im)
ims.append(ims_curr)
ani = animation.ArtistAnimation(fig, ims, blit=True, repeat_delay=1000)
save_movie(save_file, ani, frame_rate=frame_rate)
def plot_real_vs_sampled(
latents, latents_samp, states, states_samp, save_file=None, xtick_locs=None,
frame_rate=None, format='png'):
"""Plot real and sampled latents overlaying real and (potentially sampled) states.
Parameters
----------
latents : :obj:`np.ndarray`
shape (n_frames, n_latents)
latents_samp : :obj:`np.ndarray`
shape (n_frames, n_latents)
states : :obj:`np.ndarray`
shape (n_frames,)
states_samp : :obj:`np.ndarray`
shape (n_frames,) if :obj:`latents_samp` are not conditioned on :obj:`states`, otherwise
shape (0,)
save_file : :obj:`str`
full save file (path and filename)
xtick_locs : :obj:`array-like`, optional
tick locations in bin values for plot
frame_rate : :obj:`float`, optional
behavioral video framerate; to properly relabel xticks
format : :obj:`str`, optional
any accepted matplotlib save format, e.g. 'png' | 'pdf' | 'jpeg'
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle
"""
fig, axes = plt.subplots(2, 1, figsize=(10, 8))
# plot observations
axes[0] = plot_states_overlaid_with_latents(
latents, states, ax=axes[0], xtick_locs=xtick_locs, frame_rate=frame_rate)
axes[0].set_xticks([])
axes[0].set_xlabel('')
axes[0].set_title('Inferred latents')
# plot samples
if len(states_samp) == 0:
plot_states = states
title_str = 'Sampled latents'
else:
plot_states = states_samp
title_str = 'Sampled states and latents'
axes[1] = plot_states_overlaid_with_latents(
latents_samp, plot_states, ax=axes[1], xtick_locs=xtick_locs, frame_rate=frame_rate)
axes[1].set_title(title_str)
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, format=format)
return fig
def plot_states_overlaid_with_latents(
latents, states, save_file=None, ax=None, xtick_locs=None, frame_rate=None, cmap='tab20b',
format='png'):
"""Plot states for a single trial overlaid with latents.
Parameters
----------
latents : :obj:`np.ndarray`
shape (n_frames, n_latents)
states : :obj:`np.ndarray`
shape (n_frames,)
save_file : :obj:`str`, optional
full save file (path and filename)
ax : :obj:`matplotlib.Axes` object or :obj:`NoneType`, optional
axes to plot into; if :obj:`NoneType`, a new figure is created
xtick_locs : :obj:`array-like`, optional
tick locations in bin values for plot
frame_rate : :obj:`float`, optional
behavioral video framerate; to properly relabel xticks
cmap : :obj:`str`, optional
matplotlib colormap
format : :obj:`str`, optional
any accepted matplotlib save format, e.g. 'png' | 'pdf' | 'jpeg'
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle if :obj:`ax=None`, otherwise updated axis
"""
if ax is None:
fig = plt.figure(figsize=(8, 4))
ax = fig.gca()
else:
fig = None
spc = 1.1 * abs(latents.max())
n_latents = latents.shape[1]
plotting_latents = latents + spc * np.arange(n_latents)
ymin = min(-spc, np.min(plotting_latents))
ymax = max(spc * n_latents, np.max(plotting_latents))
ax.imshow(
states[None, :], aspect='auto', extent=(0, len(latents), ymin, ymax), cmap=cmap,
alpha=1.0)
ax.plot(plotting_latents, '-k', lw=3)
ax.set_ylim([ymin, ymax])
# yticks = spc * np.arange(n_latents)
# ax.set_yticks(yticks[::2])
# ax.set_yticklabels(np.arange(n_latents)[::2])
ax.set_yticks([])
# ax.set_ylabel('Latent')
ax.set_xlabel('Time (bins)')
if xtick_locs is not None:
ax.set_xticks(xtick_locs)
if frame_rate is not None:
ax.set_xticklabels((np.asarray(xtick_locs) / frame_rate).astype('int'))
ax.set_xlabel('Time (sec)')
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, format=format)
if fig is None:
return ax
else:
return fig
def plot_state_transition_matrix(model, deridge=False):
"""Plot Markov transition matrix for arhmm.
Parameters
----------
model : :obj:`ssm.HMM` object
deridge : :obj:`bool`, optional
remove diagonal to more clearly see dynamic range of off-diagonal entries
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle
"""
trans = np.copy(model.transitions.transition_matrix)
if deridge:
n_states = trans.shape[0]
for i in range(n_states):
trans[i, i] = np.nan
clim = np.nanmax(np.abs(trans))
else:
clim = 1
fig = plt.figure()
plt.imshow(trans, clim=[-clim, clim], cmap='RdBu_r')
plt.colorbar()
plt.ylabel('State (t)')
plt.xlabel('State (t+1)')
plt.title('State transition matrix')
plt.show()
return fig
def plot_dynamics_matrices(model, deridge=False):
"""Plot autoregressive dynamics matrices for arhmm.
Parameters
----------
model : :obj:`ssm.HMM` object
deridge : :obj:`bool`, optional
remove diagonal to more clearly see dynamic range of off-diagonal entries
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle
"""
K = model.K
D = model.D
n_lags = model.observations.lags
if n_lags == 1:
n_cols = 3
fac = 1
elif n_lags == 2:
n_cols = 3
fac = 1 / n_lags
elif n_lags == 3:
n_cols = 3
fac = 1.25 / n_lags
elif n_lags == 4:
n_cols = 3
fac = 1.50 / n_lags
elif n_lags == 5:
n_cols = 2
fac = 1.75 / n_lags
else:
n_cols = 1
fac = 1
n_rows = int(np.ceil(K / n_cols))
fig = plt.figure(figsize=(4 * n_cols, 4 * n_rows * fac))
mats = np.copy(model.observations.As)
if deridge:
for k in range(K):
for d in range(model.D):
mats[k, d, d] = np.nan
clim = np.nanmax(np.abs(mats))
else:
clim = np.max(np.abs(mats))
for k in range(K):
plt.subplot(n_rows, n_cols, k + 1)
im = plt.imshow(mats[k], cmap='RdBu_r', clim=[-clim, clim])
for lag in range(n_lags - 1):
plt.axvline((lag + 1) * D - 0.5, ymin=0, ymax=K, color=[0, 0, 0])
plt.xticks([])
plt.yticks([])
plt.title('State %i' % k)
plt.tight_layout()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.4, 0.03, 0.2])
fig.colorbar(im, cax=cbar_ax)
# plt.suptitle('Dynamics matrices')
return fig
def plot_obs_biases(model):
"""Plot observation bias vectors for arhmm.
Parameters
----------
model : :obj:`ssm.HMM` object
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle
"""
fig = plt.figure(figsize=(6, 4))
mats = np.copy(model.observations.bs.T)
clim = np.max(np.abs(mats))
plt.imshow(mats, cmap='RdBu_r', clim=[-clim, clim], aspect='auto')
plt.xlabel('State')
plt.yticks([])
plt.ylabel('Observation dimension')
plt.tight_layout()
plt.colorbar()
plt.title('State biases')
plt.show()
return fig
def plot_obs_covariance_matrices(model):
"""Plot observation covariance matrices for arhmm.
Parameters
----------
model : :obj:`ssm.HMM` object
Returns
-------
:obj:`matplotlib.figure.Figure`
matplotlib figure handle
"""
K = model.K
n_cols = int(np.sqrt(K))
n_rows = int(np.ceil(K / n_cols))
fig = plt.figure(figsize=(3 * n_cols, 3 * n_rows))
mats = np.copy(model.observations.Sigmas)
clim = np.quantile(np.abs(mats), 0.95)
for k in range(K):
plt.subplot(n_rows, n_cols, k + 1)
im = plt.imshow(mats[k], cmap='RdBu_r', clim=[-clim, clim])
plt.xticks([])
plt.yticks([])
plt.title('State %i' % k)
plt.tight_layout()
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.4, 0.03, 0.2])
fig.colorbar(im, cax=cbar_ax)
return fig
```
#### File: behavenet/plotting/cond_ae_utils.py
```python
import os
import copy
import pickle
import numpy as np
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import torch
from tqdm import tqdm
from behavenet import get_user_dir
from behavenet import make_dir_if_not_exists
from behavenet.data.utils import build_data_generator
from behavenet.data.utils import load_labels_like_latents
from behavenet.fitting.eval import get_reconstruction
from behavenet.fitting.utils import experiment_exists
from behavenet.fitting.utils import get_best_model_and_data
from behavenet.fitting.utils import get_expt_dir
from behavenet.fitting.utils import get_lab_example
from behavenet.fitting.utils import get_session_dir
from behavenet.plotting import concat
from behavenet.plotting import get_crop
from behavenet.plotting import load_latents
from behavenet.plotting import load_metrics_csv_as_df
from behavenet.plotting import save_movie
# to ignore imports for sphix-autoapidoc
__all__ = [
'get_input_range', 'compute_range', 'get_labels_2d_for_trial', 'get_model_input',
'interpolate_2d', 'interpolate_1d', 'interpolate_point_path', 'plot_2d_frame_array',
'plot_1d_frame_array', 'make_interpolated', 'make_interpolated_multipanel',
'plot_psvae_training_curves', 'plot_hyperparameter_search_results',
'plot_label_reconstructions', 'plot_latent_traversals', 'make_latent_traversal_movie']
# ----------------------------------------
# low-level util functions
# ----------------------------------------
def get_input_range(
input_type, hparams, sess_ids=None, sess_idx=0, model=None, data_gen=None, version=0,
min_p=5, max_p=95, apply_label_masks=False):
"""Helper function to compute input range for a variety of data types.
Parameters
----------
input_type : :obj:`str`
'latents' | 'labels' | 'labels_sc'
hparams : :obj:`dict`
needs to contain enough information to specify an autoencoder
sess_ids : :obj:`list`, optional
each entry is a session dict with keys 'lab', 'expt', 'animal', 'session'; for loading
labels and labels_sc
sess_idx : :obj:`int`, optional
session index into data generator
model : :obj:`AE` object, optional
for generating latents if latent file does not exist
data_gen : :obj:`ConcatSessionGenerator` object, optional
for generating latents if latent file does not exist
version : :obj:`int`, optional
specify AE version for loading latents
min_p : :obj:`int`, optional
defines lower end of range; percentile in [0, 100]
max_p : :obj:`int`, optional
defines upper end of range; percentile in [0, 100]
apply_label_masks : :obj:`bool`, optional
`True` to set masked values to NaN in labels
Returns
-------
:obj:`dict`
keys are 'min' and 'max'
"""
if input_type == 'latents':
# load latents
latent_file = str('%s_%s_%s_%s_latents.pkl' % (
hparams['lab'], hparams['expt'], hparams['animal'], hparams['session']))
filename = os.path.join(
hparams['expt_dir'], 'version_%i' % version, latent_file)
if not os.path.exists(filename):
from behavenet.fitting.eval import export_latents
print('latents file not found at %s' % filename)
print('exporting latents...', end='')
filenames = export_latents(data_gen, model)
filename = filenames[0]
print('done')
latents = pickle.load(open(filename, 'rb'))
inputs = latents['latents']
elif input_type == 'labels':
labels = load_labels_like_latents(hparams, sess_ids, sess_idx=sess_idx)
inputs = labels['latents']
elif input_type == 'labels_sc':
hparams2 = copy.deepcopy(hparams)
hparams2['conditional_encoder'] = True # to actually return labels
labels_sc = load_labels_like_latents(
hparams2, sess_ids, sess_idx=sess_idx, data_key='labels_sc')
inputs = labels_sc['latents']
else:
raise NotImplementedError
if apply_label_masks:
masks = load_labels_like_latents(
hparams, sess_ids, sess_idx=sess_idx, data_key='labels_masks')
for i, m in zip(inputs, masks):
i[m == 0] = np.nan
input_range = compute_range(inputs, min_p=min_p, max_p=max_p)
return input_range
def compute_range(values_list, min_p=5, max_p=95):
"""Compute min and max of a list of numbers using percentiles.
Parameters
----------
values_list : :obj:`list`
list of np.ndarrays; min/max calculated over axis 0 once all lists are vertically stacked
min_p : :obj:`int`
defines lower end of range; percentile in [0, 100]
max_p : :obj:`int`
defines upper end of range; percentile in [0, 100]
Returns
-------
:obj:`dict`
lower ['min'] and upper ['max'] range of input
"""
if np.any([len(arr) == 0 for arr in values_list]):
values_ = []
for arr in values_list:
if len(arr) != 0:
values_.append(arr)
values = np.vstack(values_)
else:
values = np.vstack(values_list)
ranges = {
'min': np.nanpercentile(values, min_p, axis=0),
'max': np.nanpercentile(values, max_p, axis=0)}
return ranges
def get_labels_2d_for_trial(
hparams, sess_ids, trial=None, trial_idx=None, sess_idx=0, dtype='test', data_gen=None):
"""Return scaled labels (in pixel space) for a given trial.
Parameters
----------
hparams : :obj:`dict`
needs to contain enough information to build a data generator
sess_ids : :obj:`list` of :obj:`dict`
each entry is a session dict with keys 'lab', 'expt', 'animal', 'session'
trial : :obj:`int`, optional
trial index into all possible trials (train, val, test); one of `trial` or `trial_idx`
must be specified; `trial` takes precedence over `trial_idx`
trial_idx : :obj:`int`, optional
trial index into trial type defined by `dtype`; one of `trial` or `trial_idx` must be
specified; `trial` takes precedence over `trial_idx`
sess_idx : :obj:`int`, optional
session index into data generator
dtype : :obj:`str`, optional
data type that is indexed by `trial_idx`; 'train' | 'val' | 'test'
data_gen : :obj:`ConcatSessionGenerator` object, optional
for generating labels
Returns
-------
:obj:`tuple`
- labels_2d_pt (:obj:`torch.Tensor`) of shape (batch, n_labels, y_pix, x_pix)
- labels_2d_np (:obj:`np.ndarray`) of shape (batch, n_labels, y_pix, x_pix)
"""
if (trial_idx is not None) and (trial is not None):
raise ValueError('only one of "trial" or "trial_idx" can be specified')
if data_gen is None:
hparams_new = copy.deepcopy(hparams)
hparams_new['conditional_encoder'] = True # ensure scaled labels are returned
hparams_new['device'] = 'cpu'
hparams_new['as_numpy'] = False
hparams_new['batch_load'] = True
data_gen = build_data_generator(hparams_new, sess_ids, export_csv=False)
# get trial
if trial is None:
trial = data_gen.datasets[sess_idx].batch_idxs[dtype][trial_idx]
batch = data_gen.datasets[sess_idx][trial]
labels_2d_pt = batch['labels_sc']
labels_2d_np = labels_2d_pt.cpu().detach().numpy()
return labels_2d_pt, labels_2d_np
def get_model_input(
data_generator, hparams, model, trial=None, trial_idx=None, sess_idx=0, max_frames=200,
compute_latents=False, compute_2d_labels=True, compute_scaled_labels=False, dtype='test'):
"""Return images, latents, and labels for a given trial.
Parameters
----------
data_generator: :obj:`ConcatSessionGenerator`
for generating model input
hparams : :obj:`dict`
needs to contain enough information to specify both a model and the associated data
model : :obj:`behavenet.models` object
model type
trial : :obj:`int`, optional
trial index into all possible trials (train, val, test); one of `trial` or `trial_idx`
must be specified; `trial` takes precedence over `trial_idx`
trial_idx : :obj:`int`, optional
trial index into trial type defined by `dtype`; one of `trial` or `trial_idx` must be
specified; `trial` takes precedence over `trial_idx`
sess_idx : :obj:`int`, optional
session index into data generator
max_frames : :obj:`int`, optional
maximum size of batch to return
compute_latents : :obj:`bool`, optional
`True` to return latents
compute_2d_labels : :obj:`bool`, optional
`True` to return 2d label tensors of shape (batch, n_labels, y_pix, x_pix)
compute_scaled_labels : :obj:`bool`, optional
ignored if `compute_2d_labels` is `True`; if `compute_scaled_labels=True`, return scaled
labels as shape (batch, n_labels) rather than 2d labels as shape
(batch, n_labels, y_pix, x_pix).
dtype : :obj:`str`, optional
data type that is indexed by `trial_idx`; 'train' | 'val' | 'test'
Returns
-------
:obj:`tuple`
- ims_pt (:obj:`torch.Tensor`) of shape (max_frames, n_channels, y_pix, x_pix)
- ims_np (:obj:`np.ndarray`) of shape (max_frames, n_channels, y_pix, x_pix)
- latents_np (:obj:`np.ndarray`) of shape (max_frames, n_latents)
- labels_pt (:obj:`torch.Tensor`) of shape (max_frames, n_labels)
- labels_2d_pt (:obj:`torch.Tensor`) of shape (max_frames, n_labels, y_pix, x_pix)
- labels_2d_np (:obj:`np.ndarray`) of shape (max_frames, n_labels, y_pix, x_pix)
"""
if (trial_idx is not None) and (trial is not None):
raise ValueError('only one of "trial" or "trial_idx" can be specified')
if (trial_idx is None) and (trial is None):
raise ValueError('one of "trial" or "trial_idx" must be specified')
# get trial
if trial is None:
trial = data_generator.datasets[sess_idx].batch_idxs[dtype][trial_idx]
batch = data_generator.datasets[sess_idx][trial]
ims_pt = batch['images'][:max_frames]
ims_np = ims_pt.cpu().detach().numpy()
# continuous labels
if hparams['model_class'] == 'ae' \
or hparams['model_class'] == 'vae' \
or hparams['model_class'] == 'beta-tcvae':
labels_pt = None
labels_np = None
elif hparams['model_class'] == 'cond-ae' \
or hparams['model_class'] == 'cond-vae' \
or hparams['model_class'] == 'cond-ae-msp' \
or hparams['model_class'] == 'ps-vae' \
or hparams['model_class'] == 'labels-images':
labels_pt = batch['labels'][:max_frames]
labels_np = labels_pt.cpu().detach().numpy()
else:
raise NotImplementedError
# one hot labels
if hparams['conditional_encoder']:
labels_2d_pt = batch['labels_sc'][:max_frames]
labels_2d_np = labels_2d_pt.cpu().detach().numpy()
else:
if compute_2d_labels:
hparams['session_dir'], sess_ids = get_session_dir(hparams)
labels_2d_pt, labels_2d_np = get_labels_2d_for_trial(hparams, sess_ids, trial=trial)
elif compute_scaled_labels:
labels_2d_pt = None
import h5py
hdf5_file = data_generator.datasets[sess_idx].paths['labels']
with h5py.File(hdf5_file, 'r', libver='latest', swmr=True) as f:
labels_2d_np = f['labels_sc'][str('trial_%04i' % trial)][()].astype('float32')
else:
labels_2d_pt, labels_2d_np = None, None
# latents
if compute_latents:
if hparams['model_class'] == 'cond-ae-msp' or hparams['model_class'] == 'ps-vae':
latents_np = model.get_transformed_latents(ims_pt, dataset=sess_idx, as_numpy=True)
else:
_, latents_np = get_reconstruction(
model, ims_pt, labels=labels_pt, labels_2d=labels_2d_pt, return_latents=True)
else:
latents_np = None
return ims_pt, ims_np, latents_np, labels_pt, labels_np, labels_2d_pt, labels_2d_np
def interpolate_2d(
interp_type, model, ims_0, latents_0, labels_0, labels_sc_0, mins, maxes, input_idxs,
n_frames, crop_type=None, mins_sc=None, maxes_sc=None, crop_kwargs=None,
marker_idxs=None, ch=0):
"""Return reconstructed images created by interpolating through latent/label space.
Parameters
----------
interp_type : :obj:`str`
'latents' | 'labels'
model : :obj:`behavenet.models` object
autoencoder model
ims_0 : :obj:`torch.Tensor`
base images for interpolating labels, of shape (1, n_channels, y_pix, x_pix)
latents_0 : :obj:`np.ndarray`
base latents of shape (1, n_latents); only two of these dimensions will be changed if
`interp_type='latents'`
labels_0 : :obj:`np.ndarray`
base labels of shape (1, n_labels)
labels_sc_0 : :obj:`np.ndarray`
base scaled labels in pixel space of shape (1, n_labels, y_pix, x_pix)
mins : :obj:`array-like`
minimum values of labels/latents, one for each dim
maxes : :obj:`list`
maximum values of labels/latents, one for each dim
input_idxs : :obj:`list`
indices of labels/latents that will be interpolated; for labels, must be y first, then x
for proper marker recording
n_frames : :obj:`int`
number of interpolation points between mins and maxes (inclusive)
crop_type : :obj:`str` or :obj:`NoneType`, optional
currently only implements 'fixed'; if not None, cropped images are returned, and returned
labels are also cropped so that they can be plotted on top of the cropped images; if None,
returned cropped images are empty and labels are relative to original image size
mins_sc : :obj:`list`, optional
min values of scaled labels that correspond to min values of labels when using conditional
encoders
maxes_sc : :obj:`list`, optional
max values of scaled labels that correspond to max values of labels when using conditional
encoders
crop_kwargs : :obj:`dict`, optional
define center and extent of crop if `crop_type='fixed'`; keys are 'x_0', 'x_ext', 'y_0',
'y_ext'
marker_idxs : :obj:`list`, optional
indices of `labels_sc_0` that will be interpolated; note that this is analogous but
different from `input_idxs`, since the 2d tensor `labels_sc_0` has half as many label
dimensions as `latents_0` and `labels_0`
ch : :obj:`int`, optional
specify which channel of input images to return (can only be a single value)
Returns
-------
:obj:`tuple`
- ims_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated images
- labels_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated labels
- ims_crop_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated , cropped
images
"""
if interp_type == 'labels':
from behavenet.data.transforms import MakeOneHot2D
_, _, y_pix, x_pix = ims_0.shape
one_hot_2d = MakeOneHot2D(y_pix, x_pix)
# compute grid for relevant inputs
n_interp_dims = len(input_idxs)
assert n_interp_dims == 2
# compute ranges for relevant inputs
inputs = []
inputs_sc = []
for d in input_idxs:
inputs.append(np.linspace(mins[d], maxes[d], n_frames))
if mins_sc is not None and maxes_sc is not None:
inputs_sc.append(np.linspace(mins_sc[d], maxes_sc[d], n_frames))
else:
if interp_type == 'labels':
raise NotImplementedError
ims_list = []
ims_crop_list = []
labels_list = []
# latent_vals = []
for i0 in range(n_frames):
ims_tmp = []
ims_crop_tmp = []
labels_tmp = []
# latents_tmp = []
for i1 in range(n_frames):
if interp_type == 'latents':
# get (new) latents
latents = np.copy(latents_0)
latents[0, input_idxs[0]] = inputs[0][i0]
latents[0, input_idxs[1]] = inputs[1][i1]
# get scaled labels (for markers)
labels_sc = _get_updated_scaled_labels(labels_sc_0)
if model.hparams['model_class'] == 'cond-ae-msp':
# get reconstruction
im_tmp = get_reconstruction(
model,
torch.from_numpy(latents).float(),
apply_inverse_transform=True)
else:
# get labels
if model.hparams['model_class'] == 'ae' \
or model.hparams['model_class'] == 'vae' \
or model.hparams['model_class'] == 'beta-tcvae' \
or model.hparams['model_class'] == 'ps-vae':
labels = None
elif model.hparams['model_class'] == 'cond-ae' \
or model.hparams['model_class'] == 'cond-vae':
labels = torch.from_numpy(labels_0).float()
else:
raise NotImplementedError
# get reconstruction
im_tmp = get_reconstruction(
model,
torch.from_numpy(latents).float(),
labels=labels)
elif interp_type == 'labels':
# get (new) scaled labels
labels_sc = _get_updated_scaled_labels(
labels_sc_0, input_idxs, [inputs_sc[0][i0], inputs_sc[1][i1]])
if len(labels_sc_0.shape) == 4:
# 2d scaled labels
labels_2d = torch.from_numpy(one_hot_2d(labels_sc)).float()
else:
# 1d scaled labels
labels_2d = None
if model.hparams['model_class'] == 'cond-ae-msp' \
or model.hparams['model_class'] == 'ps-vae':
# change latents that correspond to desired labels
latents = np.copy(latents_0)
latents[0, input_idxs[0]] = inputs[0][i0]
latents[0, input_idxs[1]] = inputs[1][i1]
# get reconstruction
im_tmp = get_reconstruction(model, latents, apply_inverse_transform=True)
else:
# get (new) labels
labels = np.copy(labels_0)
labels[0, input_idxs[0]] = inputs[0][i0]
labels[0, input_idxs[1]] = inputs[1][i1]
# get reconstruction
im_tmp = get_reconstruction(
model,
ims_0,
labels=torch.from_numpy(labels).float(),
labels_2d=labels_2d)
else:
raise NotImplementedError
ims_tmp.append(np.copy(im_tmp[0, ch]))
if crop_type:
x_min_tmp = crop_kwargs['x_0'] - crop_kwargs['x_ext']
y_min_tmp = crop_kwargs['y_0'] - crop_kwargs['y_ext']
else:
x_min_tmp = 0
y_min_tmp = 0
if interp_type == 'labels':
labels_tmp.append([
np.copy(labels_sc[0, input_idxs[0]]) - y_min_tmp,
np.copy(labels_sc[0, input_idxs[1]]) - x_min_tmp])
elif interp_type == 'latents' and labels_sc_0 is not None:
labels_tmp.append([
np.copy(labels_sc[0, marker_idxs[0]]) - y_min_tmp,
np.copy(labels_sc[0, marker_idxs[1]]) - x_min_tmp])
else:
labels_tmp.append([np.nan, np.nan])
if crop_type:
ims_crop_tmp.append(get_crop(
im_tmp[0, 0], crop_kwargs['y_0'], crop_kwargs['y_ext'], crop_kwargs['x_0'],
crop_kwargs['x_ext']))
else:
ims_crop_tmp.append([])
ims_list.append(ims_tmp)
ims_crop_list.append(ims_crop_tmp)
labels_list.append(labels_tmp)
return ims_list, labels_list, ims_crop_list
def interpolate_1d(
interp_type, model, ims_0, latents_0, labels_0, labels_sc_0, mins, maxes, input_idxs,
n_frames, crop_type=None, mins_sc=None, maxes_sc=None, crop_kwargs=None,
marker_idxs=None, ch=0):
"""Return reconstructed images created by interpolating through latent/label space.
Parameters
----------
interp_type : :obj:`str`
'latents' | 'labels'
model : :obj:`behavenet.models` object
autoencoder model
ims_0 : :obj:`torch.Tensor`
base images for interpolating labels, of shape (1, n_channels, y_pix, x_pix)
latents_0 : :obj:`np.ndarray`
base latents of shape (1, n_latents); only two of these dimensions will be changed if
`interp_type='latents'`
labels_0 : :obj:`np.ndarray`
base labels of shape (1, n_labels)
labels_sc_0 : :obj:`np.ndarray`
base scaled labels in pixel space of shape (1, n_labels, y_pix, x_pix)
mins : :obj:`array-like`
minimum values of all labels/latents
maxes : :obj:`array-like`
maximum values of all labels/latents
input_idxs : :obj:`array-like`
indices of labels/latents that will be interpolated
n_frames : :obj:`int`
number of interpolation points between mins and maxes (inclusive)
crop_type : :obj:`str` or :obj:`NoneType`, optional
currently only implements 'fixed'; if not None, cropped images are returned, and returned
labels are also cropped so that they can be plotted on top of the cropped images; if None,
returned cropped images are empty and labels are relative to original image size
mins_sc : :obj:`list`, optional
min values of scaled labels that correspond to min values of labels when using conditional
encoders
maxes_sc : :obj:`list`, optional
max values of scaled labels that correspond to max values of labels when using conditional
encoders
crop_kwargs : :obj:`dict`, optional
define center and extent of crop if `crop_type='fixed'`; keys are 'x_0', 'x_ext', 'y_0',
'y_ext'
marker_idxs : :obj:`list`, optional
indices of `labels_sc_0` that will be interpolated; note that this is analogous but
different from `input_idxs`, since the 2d tensor `labels_sc_0` has half as many label
dimensions as `latents_0` and `labels_0`
ch : :obj:`int`, optional
specify which channel of input images to return (can only be a single value)
Returns
-------
:obj:`tuple`
- ims_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated images
- labels_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated labels
- ims_crop_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated , cropped
images
"""
if interp_type == 'labels':
from behavenet.data.transforms import MakeOneHot2D
_, _, y_pix, x_pix = ims_0.shape
one_hot_2d = MakeOneHot2D(y_pix, x_pix)
n_interp_dims = len(input_idxs)
# compute ranges for relevant inputs
inputs = []
inputs_sc = []
for d in input_idxs:
inputs.append(np.linspace(mins[d], maxes[d], n_frames))
if mins_sc is not None and maxes_sc is not None:
inputs_sc.append(np.linspace(mins_sc[d], maxes_sc[d], n_frames))
else:
if interp_type == 'labels':
raise NotImplementedError
ims_list = []
ims_crop_list = []
labels_list = []
# latent_vals = []
for i0 in range(n_interp_dims):
ims_tmp = []
ims_crop_tmp = []
labels_tmp = []
for i1 in range(n_frames):
if interp_type == 'latents':
# get (new) latents
latents = np.copy(latents_0)
latents[0, input_idxs[i0]] = inputs[i0][i1]
# get scaled labels (for markers)
labels_sc = _get_updated_scaled_labels(labels_sc_0)
if model.hparams['model_class'] == 'cond-ae-msp':
# get reconstruction
im_tmp = get_reconstruction(
model,
torch.from_numpy(latents).float(),
apply_inverse_transform=True)
else:
# get labels
if model.hparams['model_class'] == 'ae' \
or model.hparams['model_class'] == 'vae' \
or model.hparams['model_class'] == 'beta-tcvae' \
or model.hparams['model_class'] == 'ps-vae':
labels = None
elif model.hparams['model_class'] == 'cond-ae' \
or model.hparams['model_class'] == 'cond-vae':
labels = torch.from_numpy(labels_0).float()
else:
raise NotImplementedError
# get reconstruction
im_tmp = get_reconstruction(
model,
torch.from_numpy(latents).float(),
labels=labels)
elif interp_type == 'labels':
# get (new) scaled labels
labels_sc = _get_updated_scaled_labels(
labels_sc_0, input_idxs[i0], inputs_sc[i0][i1])
if len(labels_sc_0.shape) == 4:
# 2d scaled labels
labels_2d = torch.from_numpy(one_hot_2d(labels_sc)).float()
else:
# 1d scaled labels
labels_2d = None
if model.hparams['model_class'] == 'cond-ae-msp' \
or model.hparams['model_class'] == 'ps-vae':
# change latents that correspond to desired labels
latents = np.copy(latents_0)
latents[0, input_idxs[i0]] = inputs[i0][i1]
# get reconstruction
im_tmp = get_reconstruction(model, latents, apply_inverse_transform=True)
else:
# get (new) labels
labels = np.copy(labels_0)
labels[0, input_idxs[i0]] = inputs[i0][i1]
# get reconstruction
im_tmp = get_reconstruction(
model,
ims_0,
labels=torch.from_numpy(labels).float(),
labels_2d=labels_2d)
else:
raise NotImplementedError
ims_tmp.append(np.copy(im_tmp[0, ch]))
if crop_type:
x_min_tmp = crop_kwargs['x_0'] - crop_kwargs['x_ext']
y_min_tmp = crop_kwargs['y_0'] - crop_kwargs['y_ext']
else:
x_min_tmp = 0
y_min_tmp = 0
if interp_type == 'labels':
labels_tmp.append([
np.copy(labels_sc[0, input_idxs[0]]) - y_min_tmp,
np.copy(labels_sc[0, input_idxs[1]]) - x_min_tmp])
elif interp_type == 'latents' and labels_sc_0 is not None:
labels_tmp.append([
np.copy(labels_sc[0, marker_idxs[0]]) - y_min_tmp,
np.copy(labels_sc[0, marker_idxs[1]]) - x_min_tmp])
else:
labels_tmp.append([np.nan, np.nan])
if crop_type:
ims_crop_tmp.append(get_crop(
im_tmp[0, 0], crop_kwargs['y_0'], crop_kwargs['y_ext'], crop_kwargs['x_0'],
crop_kwargs['x_ext']))
else:
ims_crop_tmp.append([])
ims_list.append(ims_tmp)
ims_crop_list.append(ims_crop_tmp)
labels_list.append(labels_tmp)
return ims_list, labels_list, ims_crop_list
def interpolate_point_path(
interp_type, model, ims_0, labels_0, points, n_frames=10, ch=0, crop_kwargs=None,
apply_inverse_transform=True):
"""Return reconstructed images created by interpolating through multiple points.
This function is a simplified version of :func:`interpolate_1d()`; this function computes a
traversal for a single dimension instead of all dimensions; also, this function does not
support conditional encoders, nor does it attempt to compute the interpolated, scaled values
of the labels as :func:`interpolate_1d()` does. This function should supercede
:func:`interpolate_1d()` in a future refactor. Also note that this function is utilized by
the code to make traversal movies, whereas :func:`interpolate_1d()` is utilized by the code to
make traversal plots.
Parameters
----------
interp_type : :obj:`str`
'latents' | 'labels'
model : :obj:`behavenet.models` object
autoencoder model
ims_0 : :obj:`np.ndarray`
base images for interpolating labels, of shape (1, n_channels, y_pix, x_pix)
labels_0 : :obj:`np.ndarray`
base labels of shape (1, n_labels); these values will be used if
`interp_type='latents'`, and they will be ignored if `inter_type='labels'`
(since `points` will be used)
points : :obj:`list`
one entry for each point in path; each entry is an np.ndarray of shape (n_latents,)
n_frames : :obj:`int` or :obj:`array-like`
number of interpolation points between each point; can be an integer that is used
for all paths, or an array/list of length one less than number of points
ch : :obj:`int`, optional
specify which channel of input images to return; if not an int, all channels are
concatenated in the horizontal dimension
crop_kwargs : :obj:`dict`, optional
if crop_type is not None, provides information about the crop (for a fixed crop window)
keys : 'y_0', 'x_0', 'y_ext', 'x_ext'; window is
(y_0 - y_ext, y_0 + y_ext) in vertical direction and
(x_0 - x_ext, x_0 + x_ext) in horizontal direction
apply_inverse_transform : :obj:`bool`
if inputs are latents (and model class is 'cond-ae-msp' or 'ps-vae'), apply inverse
transform to put in original latent space
Returns
-------
:obj:`tuple`
- ims_list (:obj:`list` of :obj:`np.ndarray`) interpolated images
- inputs_list (:obj:`list` of :obj:`np.ndarray`) interpolated values
"""
if model.hparams.get('conditional_encoder', False):
raise NotImplementedError
n_points = len(points)
if isinstance(n_frames, int):
n_frames = [n_frames] * (n_points - 1)
assert len(n_frames) == (n_points - 1)
ims_list = []
inputs_list = []
for p in range(n_points - 1):
p0 = points[None, p]
p1 = points[None, p + 1]
p_vec = (p1 - p0) / n_frames[p]
for pn in range(n_frames[p]):
vec = p0 + pn * p_vec
if interp_type == 'latents':
if model.hparams['model_class'] == 'cond-ae' \
or model.hparams['model_class'] == 'cond-vae':
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=apply_inverse_transform,
labels=torch.from_numpy(labels_0).float().to(model.hparams['device']))
else:
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=apply_inverse_transform)
elif interp_type == 'labels':
if model.hparams['model_class'] == 'cond-ae-msp' \
or model.hparams['model_class'] == 'ps-vae':
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=True)
else: # cond-ae
im_tmp = get_reconstruction(
model, ims_0,
labels=torch.from_numpy(vec).float().to(model.hparams['device']))
else:
raise NotImplementedError
if crop_kwargs is not None:
if not isinstance(ch, int):
raise ValueError('"ch" must be an integer to use crop_kwargs')
ims_list.append(get_crop(
im_tmp[0, ch],
crop_kwargs['y_0'], crop_kwargs['y_ext'],
crop_kwargs['x_0'], crop_kwargs['x_ext']))
else:
if isinstance(ch, int):
ims_list.append(np.copy(im_tmp[0, ch]))
else:
ims_list.append(np.copy(concat(im_tmp[0])))
inputs_list.append(vec)
return ims_list, inputs_list
def _get_updated_scaled_labels(labels_og, idxs=None, vals=None):
"""Helper function for interpolate_xd functions."""
if labels_og is not None:
if len(labels_og.shape) == 4:
# 2d scaled labels
tmp = np.copy(labels_og)
t, y, x = np.where(tmp[0] == 1)
labels_sc = np.hstack([x, y])[None, :]
else:
# 1d scaled labels
labels_sc = np.copy(labels_og)
if idxs is not None:
if isinstance(idxs, int):
assert isinstance(vals, float)
idxs = [idxs]
vals = [vals]
else:
assert len(idxs) == len(vals)
for idx, val in zip(idxs, vals):
labels_sc[0, idx] = val
else:
labels_sc = None
return labels_sc
# ----------------------------------------
# mid-level plotting functions
# ----------------------------------------
def plot_2d_frame_array(
ims_list, markers=None, im_kwargs=None, marker_kwargs=None, figsize=None, save_file=None,
format='pdf'):
"""Plot list of list of interpolated images output by :func:`interpolate_2d()` in a 2d grid.
Parameters
----------
ims_list : :obj:`list` of :obj:`list`
each inner list element holds an np.ndarray of shape (y_pix, x_pix)
markers : :obj:`list` of :obj:`list` or NoneType, optional
each inner list element holds an array-like object with values (y_pix, x_pix); if None,
markers are not plotted on top of frames
im_kwargs : :obj:`dict` or NoneType, optional
kwargs for `matplotlib.pyplot.imshow()` function (vmin, vmax, cmap, etc)
marker_kwargs : :obj:`dict` or NoneType, optional
kwargs for `matplotlib.pyplot.plot()` function (markersize, markeredgewidth, etc)
figsize : :obj:`tuple`, optional
(width, height) in inches
save_file : :obj:`str` or NoneType, optional
figure saved if not None
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
"""
n_y = len(ims_list)
n_x = len(ims_list[0])
if figsize is None:
y_pix, x_pix = ims_list[0][0].shape
# how many inches per pixel?
in_per_pix = 15 / (x_pix * n_x)
figsize = (15, in_per_pix * y_pix * n_y)
fig, axes = plt.subplots(n_y, n_x, figsize=figsize)
if im_kwargs is None:
im_kwargs = {'vmin': 0, 'vmax': 1, 'cmap': 'gray'}
if marker_kwargs is None:
marker_kwargs = {'markersize': 20, 'markeredgewidth': 3}
for r, ims_list_y in enumerate(ims_list):
for c, im in enumerate(ims_list_y):
axes[r, c].imshow(im, **im_kwargs)
axes[r, c].set_xticks([])
axes[r, c].set_yticks([])
if markers is not None:
axes[r, c].plot(
markers[r][c][1], markers[r][c][0], 'o', **marker_kwargs)
plt.subplots_adjust(wspace=0, hspace=0, bottom=0, left=0, top=1, right=1)
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, bbox_inches='tight')
plt.show()
def plot_1d_frame_array(
ims_list, markers=None, im_kwargs=None, marker_kwargs=None, plot_ims=True, plot_diffs=True,
figsize=None, save_file=None, format='pdf'):
"""Plot list of list of interpolated images output by :func:`interpolate_1d()` in a 2d grid.
Parameters
----------
ims_list : :obj:`list` of :obj:`list`
each inner list element holds an np.ndarray of shape (y_pix, x_pix)
markers : :obj:`list` of :obj:`list` or NoneType, optional
each inner list element holds an array-like object with values (y_pix, x_pix); if None,
markers are not plotted on top of frames
im_kwargs : :obj:`dict` or NoneType, optional
kwargs for `matplotlib.pyplot.imshow()` function (vmin, vmax, cmap, etc)
marker_kwargs : :obj:`dict` or NoneType, optional
kwargs for `matplotlib.pyplot.plot()` function (markersize, markeredgewidth, etc)
plot_ims : :obj:`bool`, optional
plot images
plot_diffs : :obj:`bool`, optional
plot differences
figsize : :obj:`tuple`, optional
(width, height) in inches
save_file : :obj:`str` or NoneType, optional
figure saved if not None
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
"""
if not (plot_ims or plot_diffs):
raise ValueError('Must plot at least one of ims or diffs')
if plot_ims and plot_diffs:
n_y = len(ims_list) * 2
offset = 2
else:
n_y = len(ims_list)
offset = 1
n_x = len(ims_list[0])
if figsize is None:
y_pix, x_pix = ims_list[0][0].shape
# how many inches per pixel?
in_per_pix = 15 / (x_pix * n_x)
figsize = (15, in_per_pix * y_pix * n_y)
fig, axes = plt.subplots(n_y, n_x, figsize=figsize)
if im_kwargs is None:
im_kwargs = {'vmin': 0, 'vmax': 1, 'cmap': 'gray'}
if marker_kwargs is None:
marker_kwargs = {'markersize': 20, 'markeredgewidth': 3}
for r, ims_list_y in enumerate(ims_list):
base_im = ims_list_y[0]
for c, im in enumerate(ims_list_y):
# plot original images
if plot_ims:
axes[offset * r, c].imshow(im, **im_kwargs)
axes[offset * r, c].set_xticks([])
axes[offset * r, c].set_yticks([])
if markers is not None:
axes[offset * r, c].plot(
markers[r][c][1], markers[r][c][0], 'o', **marker_kwargs)
# plot differences
if plot_diffs and plot_ims:
axes[offset * r + 1, c].imshow(0.5 + (im - base_im), **im_kwargs)
axes[offset * r + 1, c].set_xticks([])
axes[offset * r + 1, c].set_yticks([])
elif plot_diffs:
axes[offset * r, c].imshow(0.5 + (im - base_im), **im_kwargs)
axes[offset * r, c].set_xticks([])
axes[offset * r, c].set_yticks([])
plt.subplots_adjust(wspace=0, hspace=0, bottom=0, left=0, top=1, right=1)
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, bbox_inches='tight')
plt.show()
def make_interpolated(
ims, save_file, markers=None, text=None, text_title=None, text_color=[1, 1, 1],
frame_rate=20, scale=3, markersize=10, markeredgecolor='w', markeredgewidth=1, ax=None):
"""Make a latent space interpolation movie.
Parameters
----------
ims : :obj:`list` of :obj:`np.ndarray`
each list element is an array of shape (y_pix, x_pix)
save_file : :obj:`str`
absolute path of save file; does not need file extension, will automatically be saved as
mp4. To save as a gif, include the '.gif' file extension in `save_file`. The movie will
only be saved if `ax` is `NoneType`; else the list of animated frames is returned
markers : :obj:`array-like`, optional
array of size (n_frames, 2) which specifies the (x, y) coordinates of a marker on each
frame
text : :obj:`array-like`, optional
array of size (n_frames) which specifies text printed in the lower left corner of each
frame
text_title : :obj:`array-like`, optional
array of size (n_frames) which specifies text printed in the upper left corner of each
frame
text_color : :obj:`array-like`, optional
rgb array specifying color of `text` and `text_title`, if applicable
frame_rate : :obj:`float`, optional
frame rate of saved movie
scale : :obj:`float`, optional
width of panel is (scale / 2) inches
markersize : :obj:`float`, optional
size of marker if `markers` is not `NoneType`
markeredgecolor : :obj:`float`, optional
color of marker edge if `markers` is not `NoneType`
markeredgewidth : :obj:`float`, optional
width of marker edge if `markers` is not `NoneType`
ax : :obj:`matplotlib.axes.Axes` object
optional axis in which to plot the frames; if this argument is not `NoneType` the list of
animated frames is returned and the movie is not saved
Returns
-------
:obj:`list`
list of list of animated frames if `ax` is True; else save movie
"""
y_pix, x_pix = ims[0].shape
if ax is None:
fig_width = scale / 2
fig_height = y_pix / x_pix * scale / 2
fig = plt.figure(figsize=(fig_width, fig_height), dpi=300)
ax = plt.gca()
return_ims = False
else:
return_ims = True
ax.set_xticks([])
ax.set_yticks([])
default_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}
txt_kwargs = {
'fontsize': 4, 'color': text_color, 'fontname': 'monospace',
'horizontalalignment': 'left', 'verticalalignment': 'center',
'transform': ax.transAxes}
# ims is a list of lists, each row is a list of artists to draw in the current frame; here we
# are just animating one artist, the image, in each frame
ims_ani = []
for i, im in enumerate(ims):
im_tmp = []
im_tmp.append(ax.imshow(im, **default_kwargs))
# [s.set_visible(False) for s in ax.spines.values()]
if markers is not None:
im_tmp.append(ax.plot(
markers[i, 0], markers[i, 1], '.r', markersize=markersize,
markeredgecolor=markeredgecolor, markeredgewidth=markeredgewidth)[0])
if text is not None:
im_tmp.append(ax.text(0.02, 0.06, text[i], **txt_kwargs))
if text_title is not None:
im_tmp.append(ax.text(0.02, 0.92, text_title[i], **txt_kwargs))
ims_ani.append(im_tmp)
if return_ims:
return ims_ani
else:
plt.tight_layout(pad=0)
ani = animation.ArtistAnimation(fig, ims_ani, blit=True, repeat_delay=1000)
save_movie(save_file, ani, frame_rate=frame_rate)
def make_interpolated_multipanel(
ims, save_file, markers=None, text=None, text_title=None, frame_rate=20, n_cols=3, scale=1,
**kwargs):
"""Make a multi-panel latent space interpolation movie.
Parameters
----------
ims : :obj:`list` of :obj:`list` of :obj:`np.ndarray`
each list element is used to for a single panel, and is another list that contains arrays
of shape (y_pix, x_pix)
save_file : :obj:`str`
absolute path of save file; does not need file extension, will automatically be saved as
mp4. To save as a gif, include the '.gif' file extension in `save_file`.
markers : :obj:`list` of :obj:`array-like`, optional
each list element is used for a single panel, and is an array of size (n_frames, 2)
which specifies the (x, y) coordinates of a marker on each frame for that panel
text : :obj:`list` of :obj:`array-like`, optional
each list element is used for a single panel, and is an array of size (n_frames) which
specifies text printed in the lower left corner of each frame for that panel
text_title : :obj:`list` of :obj:`array-like`, optional
each list element is used for a single panel, and is an array of size (n_frames) which
specifies text printed in the upper left corner of each frame for that panel
frame_rate : :obj:`float`, optional
frame rate of saved movie
n_cols : :obj:`int`, optional
movie is `n_cols` panels wide
scale : :obj:`float`, optional
width of panel is (scale / 2) inches
kwargs
arguments are additional arguments to :func:`make_interpolated`, like 'markersize',
'markeredgewidth', 'markeredgecolor', etc.
"""
n_panels = len(ims)
markers = [None] * n_panels if markers is None else markers
text = [None] * n_panels if text is None else text
y_pix, x_pix = ims[0][0].shape
n_rows = int(np.ceil(n_panels / n_cols))
fig_width = scale / 2 * n_cols
fig_height = y_pix / x_pix * scale / 2 * n_rows
fig, axes = plt.subplots(n_rows, n_cols, figsize=(fig_width, fig_height), dpi=300)
plt.subplots_adjust(wspace=0, hspace=0, left=0, bottom=0, right=1, top=1)
# fill out empty panels with black frames
while len(ims) < n_rows * n_cols:
ims.append(np.zeros(ims[0].shape))
markers.append(None)
text.append(None)
# ims is a list of lists, each row is a list of artists to draw in the current frame; here we
# are just animating one artist, the image, in each frame
ims_ani = []
for i, (ims_curr, markers_curr, text_curr) in enumerate(zip(ims, markers, text)):
col = i % n_cols
row = int(np.floor(i / n_cols))
if i == 0:
text_title_str = text_title
else:
text_title_str = None
if n_rows == 1:
ax = axes[col]
elif n_cols == 1:
ax = axes[row]
else:
ax = axes[row, col]
ims_ani_curr = make_interpolated(
ims=ims_curr, markers=markers_curr, text=text_curr, text_title=text_title_str, ax=ax,
save_file=None, **kwargs)
ims_ani.append(ims_ani_curr)
# turn off other axes
i += 1
while i < n_rows * n_cols:
col = i % n_cols
row = int(np.floor(i / n_cols))
axes[row, col].set_axis_off()
i += 1
# rearrange ims:
# currently a list of length n_panels, each element of which is a list of length n_t
# we need a list of length n_t, each element of which is a list of length n_panels
n_frames = len(ims_ani[0])
ims_final = [[] for _ in range(n_frames)]
for i in range(n_frames):
for j in range(n_panels):
ims_final[i] += ims_ani[j][i]
ani = animation.ArtistAnimation(fig, ims_final, blit=True, repeat_delay=1000)
save_movie(save_file, ani, frame_rate=frame_rate)
# ----------------------------------------
# high-level plotting functions
# ----------------------------------------
def _get_psvae_hparams(**kwargs):
hparams = {
'data_dir': get_user_dir('data'),
'save_dir': get_user_dir('save'),
'model_class': 'ps-vae',
'model_type': 'conv',
'rng_seed_data': 0,
'trial_splits': '8;1;1;0',
'train_frac': 1.0,
'rng_seed_model': 0,
'fit_sess_io_layers': False,
'learning_rate': 1e-4,
'l2_reg': 0,
'conditional_encoder': False,
'vae.beta': 1}
# update hparams
for key, val in kwargs.items():
if key == 'alpha' or key == 'beta' or key == 'gamma':
hparams['ps_vae.%s' % key] = val
else:
hparams[key] = val
return hparams
def plot_psvae_training_curves(
lab, expt, animal, session, alphas, betas, gammas, n_ae_latents, rng_seeds_model,
experiment_name, n_labels, dtype='val', save_file=None, format='pdf', **kwargs):
"""Create training plots for each term in the ps-vae objective function.
The `dtype` argument controls which type of trials are plotted ('train' or 'val').
Additionally, multiple models can be plotted simultaneously by varying one (and only one) of
the following parameters:
- alpha
- beta
- gamma
- number of unsupervised latents
- random seed used to initialize model weights
Each of these entries must be an array of length 1 except for one option, which can be an array
of arbitrary length (corresponding to already trained models). This function generates a single
plot with panels for each of the following terms:
- total loss
- pixel mse
- label R^2 (note the objective function contains the label MSE, but R^2 is easier to parse)
- KL divergence of supervised latents
- index-code mutual information of unsupervised latents
- total correlation of unsupervised latents
- dimension-wise KL of unsupervised latents
- subspace overlap
Parameters
----------
lab : :obj:`str`
lab id
expt : :obj:`str`
expt id
animal : :obj:`str`
animal id
session : :obj:`str`
session id
alphas : :obj:`array-like`
alpha values to plot
betas : :obj:`array-like`
beta values to plot
gammas : :obj:`array-like`
gamma values to plot
n_ae_latents : :obj:`array-like`
unsupervised dimensionalities to plot
rng_seeds_model : :obj:`array-like`
model seeds to plot
experiment_name : :obj:`str`
test-tube experiment name
n_labels : :obj:`int`
dimensionality of supervised latent space
dtype : :obj:`str`
'train' | 'val'
save_file : :obj:`str`, optional
absolute path of save file; does not need file extension
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
kwargs
arguments are keys of `hparams`, for example to set `train_frac`, `rng_seed_model`, etc.
"""
# check for arrays, turn ints into lists
n_arrays = 0
hue = None
if len(alphas) > 1:
n_arrays += 1
hue = 'alpha'
if len(betas) > 1:
n_arrays += 1
hue = 'beta'
if len(gammas) > 1:
n_arrays += 1
hue = 'gamma'
if len(n_ae_latents) > 1:
n_arrays += 1
hue = 'n latents'
if len(rng_seeds_model) > 1:
n_arrays += 1
hue = 'rng seed'
if n_arrays > 1:
raise ValueError(
'Can only set one of "alphas", "betas", "gammas", "n_ae_latents", or ' +
'"rng_seeds_model" as an array')
# set model info
hparams = _get_psvae_hparams(experiment_name=experiment_name, **kwargs)
metrics_list = [
'loss', 'loss_data_mse', 'label_r2',
'loss_zs_kl', 'loss_zu_mi', 'loss_zu_tc', 'loss_zu_dwkl', 'loss_AB_orth']
metrics_dfs = []
i = 0
for alpha in alphas:
for beta in betas:
for gamma in gammas:
for n_latents in n_ae_latents:
for rng in rng_seeds_model:
# update hparams
hparams['ps_vae.alpha'] = alpha
hparams['ps_vae.beta'] = beta
hparams['ps_vae.gamma'] = gamma
hparams['n_ae_latents'] = n_latents + n_labels
hparams['rng_seed_model'] = rng
try:
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
print(
'loading results with alpha=%i, beta=%i, gamma=%i (version %i)' %
(alpha, beta, gamma, version))
metrics_dfs.append(load_metrics_csv_as_df(
hparams, lab, expt, metrics_list, version=None))
metrics_dfs[i]['alpha'] = alpha
metrics_dfs[i]['beta'] = beta
metrics_dfs[i]['gamma'] = gamma
metrics_dfs[i]['n latents'] = hparams['n_ae_latents']
metrics_dfs[i]['rng seed'] = rng
i += 1
except TypeError:
print(
'could not find model for alpha=%i, beta=%i, gamma=%i' %
(alpha, beta, gamma))
continue
metrics_df = pd.concat(metrics_dfs, sort=False)
sns.set_style('white')
sns.set_context('talk')
data_queried = metrics_df[
(metrics_df.epoch > 10) & ~pd.isna(metrics_df.val) & (metrics_df.dtype == dtype)]
g = sns.FacetGrid(
data_queried, col='loss', col_wrap=3, hue=hue, sharey=False, height=4)
g = g.map(plt.plot, 'epoch', 'val').add_legend() # , color=".3", fit_reg=False, x_jitter=.1);
if save_file is not None:
make_dir_if_not_exists(save_file)
g.savefig(save_file + '.' + format, dpi=300, format=format)
def plot_hyperparameter_search_results(
lab, expt, animal, session, n_labels, label_names, alpha_weights, alpha_n_ae_latents,
alpha_expt_name, beta_weights, gamma_weights, beta_gamma_n_ae_latents,
beta_gamma_expt_name, alpha, beta, gamma, save_file, batch_size=None, format='pdf',
**kwargs):
"""Create a variety of diagnostic plots to assess the ps-vae hyperparameters.
These diagnostic plots are based on the recommended way to perform a hyperparameter search in
the ps-vae models; first, fix beta=1 and gamma=0, and do a sweep over alpha values and number
of latents (for example alpha=[50, 100, 500, 1000] and n_ae_latents=[2, 4, 8, 16]). The best
alpha value is subjective because it involves a tradeoff between pixel mse and label mse. After
choosing a suitable value, fix alpha and the number of latents and vary beta and gamma. This
function will then plot the following panels:
- pixel mse as a function of alpha/num latents (for fixed beta/gamma)
- label mse as a function of alpha/num_latents (for fixed beta/gamma)
- pixel mse as a function of beta/gamma (for fixed alpha/n_ae_latents)
- label mse as a function of beta/gamma (for fixed alpha/n_ae_latents)
- index-code mutual information (part of the KL decomposition) as a function of beta/gamma (for
fixed alpha/n_ae_latents)
- total correlation(part of the KL decomposition) as a function of beta/gamma (for fixed
alpha/n_ae_latents)
- dimension-wise KL (part of the KL decomposition) as a function of beta/gamma (for fixed
alpha/n_ae_latents)
- average correlation coefficient across all pairs of unsupervised latent dims as a function of
beta/gamma (for fixed alpha/n_ae_latents)
- subspace overlap computed as ||[A; B] - I||_2^2 for A, B the projections to the supervised
and unsupervised subspaces, respectively, and I the identity - as a function of beta/gamma
(for fixed alpha/n_ae_latents)
- example subspace overlap matrix for gamma=0 and beta=1, with fixed alpha/n_ae_latents
- example subspace overlap matrix for gamma=1000 and beta=1, with fixed alpha/n_ae_latents
Parameters
----------
lab : :obj:`str`
lab id
expt : :obj:`str`
expt id
animal : :obj:`str`
animal id
session : :obj:`str`
session id
n_labels : :obj:`str`
number of label dims
label_names : :obj:`array-like`
names of label dims
alpha_weights : :obj:`array-like`
array of alpha weights for fixed values of beta, gamma
alpha_n_ae_latents : :obj:`array-like`
array of latent dimensionalities for fixed values of beta, gamma using alpha_weights
alpha_expt_name : :obj:`str`
test-tube experiment name of alpha-based hyperparam search
beta_weights : :obj:`array-like`
array of beta weights for a fixed value of alpha
gamma_weights : :obj:`array-like`
array of beta weights for a fixed value of alpha
beta_gamma_n_ae_latents : :obj:`int`
latent dimensionality used for beta-gamma hyperparam search
beta_gamma_expt_name : :obj:`str`
test-tube experiment name of beta-gamma hyperparam search
alpha : :obj:`float`
fixed value of alpha for beta-gamma search
beta : :obj:`float`
fixed value of beta for alpha search
gamma : :obj:`float`
fixed value of gamma for alpha search
save_file : :obj:`str`
absolute path of save file; does not need file extension
batch_size : :obj:`int`, optional
size of batches, used to compute correlation coefficient per batch; if NoneType, the
correlation coefficient is computed across all time points
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
kwargs
arguments are keys of `hparams`, preceded by either `alpha_` or `beta_gamma_`. For example,
to set the train frac of the alpha models, use `alpha_train_frac`; to set the rng_data_seed
of the beta-gamma models, use `beta_gamma_rng_data_seed`.
"""
def apply_masks(data, masks):
return data[masks == 1]
def get_label_r2(hparams, model, data_generator, version, dtype='val', overwrite=False):
from sklearn.metrics import r2_score
save_file = os.path.join(
hparams['expt_dir'], 'version_%i' % version, 'r2_supervised.csv')
if not os.path.exists(save_file) or overwrite:
if not os.path.exists(save_file):
print('R^2 metrics do not exist; computing from scratch')
else:
print('overwriting metrics at %s' % save_file)
metrics_df = []
data_generator.reset_iterators(dtype)
for i_test in tqdm(range(data_generator.n_tot_batches[dtype])):
# get next minibatch and put it on the device
data, sess = data_generator.next_batch(dtype)
x = data['images'][0]
y = data['labels'][0].cpu().detach().numpy()
if 'labels_masks' in data:
n = data['labels_masks'][0].cpu().detach().numpy()
else:
n = np.ones_like(y)
z = model.get_transformed_latents(x, dataset=sess)
for i in range(n_labels):
y_true = apply_masks(y[:, i], n[:, i])
y_pred = apply_masks(z[:, i], n[:, i])
if len(y_true) > 10:
r2 = r2_score(y_true, y_pred, multioutput='variance_weighted')
mse = np.mean(np.square(y_true - y_pred))
else:
r2 = np.nan
mse = np.nan
metrics_df.append(pd.DataFrame({
'Trial': data['batch_idx'].item(),
'Label': label_names[i],
'R2': r2,
'MSE': mse,
'Model': 'PS-VAE'}, index=[0]))
metrics_df = pd.concat(metrics_df)
print('saving results to %s' % save_file)
metrics_df.to_csv(save_file, index=False, header=True)
else:
print('loading results from %s' % save_file)
metrics_df = pd.read_csv(save_file)
return metrics_df
# -----------------------------------------------------
# load pixel/label MSE as a function of n_latents/alpha
# -----------------------------------------------------
# set model info
hparams = _get_psvae_hparams(experiment_name=alpha_expt_name)
# update hparams
for key, val in kwargs.items():
# hparam vals should be named 'alpha_[property]', for example 'alpha_train_frac'
if key.split('_')[0] == 'alpha':
prop = key[6:]
hparams[prop] = val
else:
hparams[key] = val
metrics_list = ['loss_data_mse']
metrics_dfs_frame = []
metrics_dfs_marker = []
for n_latent in alpha_n_ae_latents:
hparams['n_ae_latents'] = n_latent + n_labels
for alpha_ in alpha_weights:
hparams['ps_vae.alpha'] = alpha_
hparams['ps_vae.beta'] = beta
hparams['ps_vae.gamma'] = gamma
try:
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
print('loading results with alpha=%i, beta=%i, gamma=%i (version %i)' % (
hparams['ps_vae.alpha'], hparams['ps_vae.beta'], hparams['ps_vae.gamma'],
version))
# get frame mse
metrics_dfs_frame.append(load_metrics_csv_as_df(
hparams, lab, expt, metrics_list, version=None, test=True))
metrics_dfs_frame[-1]['alpha'] = alpha_
metrics_dfs_frame[-1]['n_latents'] = hparams['n_ae_latents']
# get marker mse
model, data_gen = get_best_model_and_data(
hparams, Model=None, load_data=True, version=version)
metrics_df_ = get_label_r2(hparams, model, data_gen, version, dtype='val')
metrics_df_['alpha'] = alpha_
metrics_df_['n_latents'] = hparams['n_ae_latents']
metrics_dfs_marker.append(metrics_df_[metrics_df_.Model == 'PS-VAE'])
except TypeError:
print('could not find model for alpha=%i, beta=%i, gamma=%i' % (
hparams['ps_vae.alpha'], hparams['ps_vae.beta'], hparams['ps_vae.gamma']))
continue
metrics_df_frame = pd.concat(metrics_dfs_frame, sort=False)
metrics_df_marker = pd.concat(metrics_dfs_marker, sort=False)
print('done')
# -----------------------------------------------------
# load pixel/label MSE as a function of beta/gamma
# -----------------------------------------------------
# update hparams
hparams['experiment_name'] = beta_gamma_expt_name
for key, val in kwargs.items():
# hparam vals should be named 'beta_gamma_[property]', for example 'alpha_train_frac'
if key.split('_')[0] == 'beta' and key.split('_')[1] == 'gamma':
prop = key[11:]
hparams[prop] = val
metrics_list = ['loss_data_mse', 'loss_zu_mi', 'loss_zu_tc', 'loss_zu_dwkl', 'loss_AB_orth']
metrics_dfs_frame_bg = []
metrics_dfs_marker_bg = []
metrics_dfs_corr_bg = []
overlaps = {}
for beta in beta_weights:
for gamma in gamma_weights:
hparams['n_ae_latents'] = beta_gamma_n_ae_latents + n_labels
hparams['ps_vae.alpha'] = alpha
hparams['ps_vae.beta'] = beta
hparams['ps_vae.gamma'] = gamma
try:
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
print('loading results with alpha=%i, beta=%i, gamma=%i (version %i)' % (
hparams['ps_vae.alpha'], hparams['ps_vae.beta'], hparams['ps_vae.gamma'],
version))
# get frame mse
metrics_dfs_frame_bg.append(load_metrics_csv_as_df(
hparams, lab, expt, metrics_list, version=None, test=True))
metrics_dfs_frame_bg[-1]['beta'] = beta
metrics_dfs_frame_bg[-1]['gamma'] = gamma
# get marker mse
model, data_gen = get_best_model_and_data(
hparams, Model=None, load_data=True, version=version)
metrics_df_ = get_label_r2(hparams, model, data_gen, version, dtype='val')
metrics_df_['beta'] = beta
metrics_df_['gamma'] = gamma
metrics_dfs_marker_bg.append(metrics_df_[metrics_df_.Model == 'PS-VAE'])
# get subspace overlap
A = model.encoding.A.weight.data.cpu().detach().numpy()
B = model.encoding.B.weight.data.cpu().detach().numpy()
C = np.concatenate([A, B], axis=0)
overlap = np.matmul(C, C.T)
overlaps['beta=%i_gamma=%i' % (beta, gamma)] = overlap
# get corr
latents = load_latents(hparams, version, dtype='test')
if batch_size is None:
corr = np.corrcoef(latents[:, n_labels + np.array([0, 1])].T)
metrics_dfs_corr_bg.append(pd.DataFrame({
'loss': 'corr',
'dtype': 'test',
'val': np.abs(corr[0, 1]),
'beta': beta,
'gamma': gamma}, index=[0]))
else:
n_batches = int(np.ceil(latents.shape[0] / batch_size))
for i in range(n_batches):
corr = np.corrcoef(
latents[i * batch_size:(i + 1) * batch_size,
n_labels + np.array([0, 1])].T)
metrics_dfs_corr_bg.append(pd.DataFrame({
'loss': 'corr',
'dtype': 'test',
'val': np.abs(corr[0, 1]),
'beta': beta,
'gamma': gamma}, index=[0]))
except TypeError:
print('could not find model for alpha=%i, beta=%i, gamma=%i' % (
hparams['ps_vae.alpha'], hparams['ps_vae.beta'], hparams['ps_vae.gamma']))
continue
print()
metrics_df_frame_bg = pd.concat(metrics_dfs_frame_bg, sort=False)
metrics_df_marker_bg = pd.concat(metrics_dfs_marker_bg, sort=False)
metrics_df_corr_bg = pd.concat(metrics_dfs_corr_bg, sort=False)
print('done')
# -----------------------------------------------------
# ----------------- PLOT DATA -------------------------
# -----------------------------------------------------
sns.set_style('white')
sns.set_context('paper', font_scale=1.2)
alpha_palette = sns.color_palette('Greens')
beta_palette = sns.color_palette('Reds', len(metrics_df_corr_bg.beta.unique()))
gamma_palette = sns.color_palette('Blues', len(metrics_df_corr_bg.gamma.unique()))
from matplotlib.gridspec import GridSpec
fig = plt.figure(figsize=(12, 10), dpi=300)
n_rows = 3
n_cols = 12
gs = GridSpec(n_rows, n_cols, figure=fig)
def despine(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_palette(alpha_palette)
# --------------------------------------------------
# MSE per pixel
# --------------------------------------------------
ax_pixel_mse_alpha = fig.add_subplot(gs[0, 0:3])
data_queried = metrics_df_frame[(metrics_df_frame.dtype == 'test')]
sns.barplot(x='n_latents', y='val', hue='alpha', data=data_queried, ax=ax_pixel_mse_alpha)
ax_pixel_mse_alpha.legend().set_visible(False)
ax_pixel_mse_alpha.set_xlabel('Latent dimension')
ax_pixel_mse_alpha.set_ylabel('MSE per pixel')
ax_pixel_mse_alpha.ticklabel_format(axis='y', style='sci', scilimits=(-3, 3))
ax_pixel_mse_alpha.set_title('Beta=1, Gamma=0')
despine(ax_pixel_mse_alpha)
# --------------------------------------------------
# MSE per marker
# --------------------------------------------------
ax_marker_mse_alpha = fig.add_subplot(gs[0, 3:6])
data_queried = metrics_df_marker
sns.barplot(x='n_latents', y='MSE', hue='alpha', data=data_queried, ax=ax_marker_mse_alpha)
ax_marker_mse_alpha.set_xlabel('Latent dimension')
ax_marker_mse_alpha.set_ylabel('MSE per marker')
ax_marker_mse_alpha.set_title('Beta=1, Gamma=0')
ax_marker_mse_alpha.legend(frameon=True, title='Alpha')
despine(ax_marker_mse_alpha)
sns.set_palette(gamma_palette)
# --------------------------------------------------
# MSE per pixel (beta/gamma)
# --------------------------------------------------
ax_pixel_mse_bg = fig.add_subplot(gs[0, 6:9])
data_queried = metrics_df_frame_bg[
(metrics_df_frame_bg.dtype == 'test') &
(metrics_df_frame_bg.loss == 'loss_data_mse') &
(metrics_df_frame_bg.epoch == 200)]
sns.barplot(x='beta', y='val', hue='gamma', data=data_queried, ax=ax_pixel_mse_bg)
ax_pixel_mse_bg.legend().set_visible(False)
ax_pixel_mse_bg.set_xlabel('Beta')
ax_pixel_mse_bg.set_ylabel('MSE per pixel')
ax_pixel_mse_bg.ticklabel_format(axis='y', style='sci', scilimits=(-3, 3))
ax_pixel_mse_bg.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
despine(ax_pixel_mse_bg)
# --------------------------------------------------
# MSE per marker (beta/gamma)
# --------------------------------------------------
ax_marker_mse_bg = fig.add_subplot(gs[0, 9:12])
data_queried = metrics_df_marker_bg
sns.barplot(x='beta', y='MSE', hue='gamma', data=data_queried, ax=ax_marker_mse_bg)
ax_marker_mse_bg.set_xlabel('Beta')
ax_marker_mse_bg.set_ylabel('MSE per marker')
ax_marker_mse_bg.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
ax_marker_mse_bg.legend(frameon=True, title='Gamma', loc='lower left')
despine(ax_marker_mse_bg)
# --------------------------------------------------
# ICMI
# --------------------------------------------------
ax_icmi = fig.add_subplot(gs[1, 0:4])
data_queried = metrics_df_frame_bg[
(metrics_df_frame_bg.dtype == 'test') &
(metrics_df_frame_bg.loss == 'loss_zu_mi') &
(metrics_df_frame_bg.epoch == 200)]
sns.lineplot(
x='beta', y='val', hue='gamma', data=data_queried, ax=ax_icmi, ci=None,
palette=gamma_palette)
ax_icmi.legend().set_visible(False)
ax_icmi.set_xlabel('Beta')
ax_icmi.set_ylabel('Index-code Mutual Information')
ax_icmi.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
despine(ax_icmi)
# --------------------------------------------------
# TC
# --------------------------------------------------
ax_tc = fig.add_subplot(gs[1, 4:8])
data_queried = metrics_df_frame_bg[
(metrics_df_frame_bg.dtype == 'test') &
(metrics_df_frame_bg.loss == 'loss_zu_tc') &
(metrics_df_frame_bg.epoch == 200)]
sns.lineplot(
x='beta', y='val', hue='gamma', data=data_queried, ax=ax_tc, ci=None,
palette=gamma_palette)
ax_tc.legend().set_visible(False)
ax_tc.set_xlabel('Beta')
ax_tc.set_ylabel('Total Correlation')
ax_tc.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
despine(ax_tc)
# --------------------------------------------------
# DWKL
# --------------------------------------------------
ax_dwkl = fig.add_subplot(gs[1, 8:12])
data_queried = metrics_df_frame_bg[
(metrics_df_frame_bg.dtype == 'test') &
(metrics_df_frame_bg.loss == 'loss_zu_dwkl') &
(metrics_df_frame_bg.epoch == 200)]
sns.lineplot(
x='beta', y='val', hue='gamma', data=data_queried, ax=ax_dwkl, ci=None,
palette=gamma_palette)
ax_dwkl.legend().set_visible(False)
ax_dwkl.set_xlabel('Beta')
ax_dwkl.set_ylabel('Dimension-wise KL')
ax_dwkl.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
despine(ax_dwkl)
# --------------------------------------------------
# CC
# --------------------------------------------------
ax_cc = fig.add_subplot(gs[2, 0:3])
data_queried = metrics_df_corr_bg
sns.lineplot(
x='beta', y='val', hue='gamma', data=data_queried, ax=ax_cc, ci=None,
palette=gamma_palette)
ax_cc.legend().set_visible(False)
ax_cc.set_xlabel('Beta')
ax_cc.set_ylabel('Correlation Coefficient')
ax_cc.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
despine(ax_cc)
# --------------------------------------------------
# AB orth
# --------------------------------------------------
ax_orth = fig.add_subplot(gs[2, 3:6])
data_queried = metrics_df_frame_bg[
(metrics_df_frame_bg.dtype == 'test') &
(metrics_df_frame_bg.loss == 'loss_AB_orth') &
(metrics_df_frame_bg.epoch == 200) &
~metrics_df_frame_bg.val.isna()]
sns.lineplot(
x='gamma', y='val', hue='beta', data=data_queried, ax=ax_orth, ci=None,
palette=beta_palette)
ax_orth.legend(frameon=False, title='Beta')
ax_orth.set_xlabel('Gamma')
ax_orth.set_ylabel('Subspace overlap')
ax_orth.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
despine(ax_orth)
# --------------------------------------------------
# Gamma = 0 overlap
# --------------------------------------------------
ax_gamma0 = fig.add_subplot(gs[2, 6:9])
overlap = overlaps['beta=%i_gamma=%i' % (np.min(beta_weights), np.min(gamma_weights))]
im = ax_gamma0.imshow(overlap, cmap='PuOr', vmin=-1, vmax=1)
ax_gamma0.set_xticks(np.arange(overlap.shape[1]))
ax_gamma0.set_yticks(np.arange(overlap.shape[0]))
ax_gamma0.set_title('Subspace overlap\nGamma=%i' % np.max(gamma_weights))
fig.colorbar(im, ax=ax_gamma0, orientation='vertical', shrink=0.75)
# --------------------------------------------------
# Gamma = 1000 overlap
# --------------------------------------------------
ax_gamma1 = fig.add_subplot(gs[2, 9:12])
overlap = overlaps['beta=%i_gamma=%i' % (np.min(beta_weights), np.max(gamma_weights))]
im = ax_gamma1.imshow(overlap, cmap='PuOr', vmin=-1, vmax=1)
ax_gamma1.set_xticks(np.arange(overlap.shape[1]))
ax_gamma1.set_yticks(np.arange(overlap.shape[0]))
ax_gamma1.set_title('Subspace overlap\nGamma=%i' % np.max(gamma_weights))
fig.colorbar(im, ax=ax_gamma1, orientation='vertical', shrink=0.75)
plt.tight_layout(h_pad=3) # h_pad is fraction of font size
# reset to default color palette
# sns.set_palette(sns.color_palette(None, 10))
sns.reset_orig()
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, format=format)
def plot_label_reconstructions(
lab, expt, animal, session, n_ae_latents, experiment_name, n_labels, trials, version=None,
plot_scale=0.5, sess_idx=0, save_file=None, format='pdf', xtick_locs=None, frame_rate=None,
max_traces=8, add_r2=True, add_legend=True, colored_predictions=True, concat_trials=False,
**kwargs):
"""Plot labels and their reconstructions from an ps-vae.
Parameters
----------
lab : :obj:`str`
lab id
expt : :obj:`str`
expt id
animal : :obj:`str`
animal id
session : :obj:`str`
session id
n_ae_latents : :obj:`str`
dimensionality of unsupervised latent space; n_labels will be added to this
experiment_name : :obj:`str`
test-tube experiment name
n_labels : :obj:`str`
dimensionality of supervised latent space
trials : :obj:`array-like`
array of trials to reconstruct
version : :obj:`str` or :obj:`int`, optional
can be 'best' to load best model, and integer to load a specific model, or NoneType to use
the values in hparams to load a specific model
plot_scale : :obj:`float`
scale the magnitude of reconstructions
sess_idx : :obj:`int`, optional
session index into data generator
save_file : :obj:`str`, optional
absolute path of save file; does not need file extension
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
xtick_locs : :obj:`array-like`, optional
tick locations in units of bins
frame_rate : :obj:`float`, optional
frame rate of behavorial video; to properly relabel xticks
max_traces : :obj:`int`, optional
maximum number of traces to plot, for easier visualization
add_r2 : :obj:`bool`, optional
print R2 value on plot
add_legend : :obj:`bool`, optional
print legend on plot
colored_predictions : :obj:`bool`, optional
color predictions using default seaborn colormap; else predictions are black
concat_trials : :obj:`bool`, optional
True to plot all trials together, separated by a small gap
kwargs
arguments are keys of `hparams`, for example to set `train_frac`, `rng_seed_model`, etc.
"""
from behavenet.plotting.decoder_utils import plot_neural_reconstruction_traces
if len(trials) == 1:
concat_trials = False
# set model info
hparams = _get_psvae_hparams(
experiment_name=experiment_name, n_ae_latents=n_ae_latents + n_labels, **kwargs)
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
model, data_generator = get_best_model_and_data(
hparams, Model=None, load_data=True, version=version, data_kwargs=None)
print(data_generator)
print('alpha: %i' % model.hparams['ps_vae.alpha'])
print('beta: %i' % model.hparams['ps_vae.beta'])
print('gamma: %i' % model.hparams['ps_vae.gamma'])
print('model seed: %i' % model.hparams['rng_seed_model'])
n_blank = 5 # buffer time points between trials if concatenating
labels_og_all = []
labels_pred_all = []
for trial in trials:
# collect data
batch = data_generator.datasets[sess_idx][trial]
labels_og = batch['labels'].detach().cpu().numpy()
labels_pred = model.get_predicted_labels(batch['images']).detach().cpu().numpy()
if 'labels_masks' in batch:
labels_masks = batch['labels_masks'].detach().cpu().numpy()
labels_og[labels_masks == 0] = np.nan
# store data
labels_og_all.append(labels_og)
labels_pred_all.append(labels_pred)
if trial != trials[-1]:
labels_og_all.append(np.nan * np.zeros((n_blank, labels_og.shape[1])))
labels_pred_all.append(np.nan * np.zeros((n_blank, labels_pred.shape[1])))
# plot data from single trial
if not concat_trials:
if save_file is not None:
save_file_trial = save_file + '_trial-%i' % trial
else:
save_file_trial = None
plot_neural_reconstruction_traces(
labels_og, labels_pred, scale=plot_scale, save_file=save_file_trial, format=format,
xtick_locs=xtick_locs, frame_rate=frame_rate, max_traces=max_traces, add_r2=add_r2,
add_legend=add_legend, colored_predictions=colored_predictions)
# plot data from all trials
if concat_trials:
if save_file is not None:
save_file_trial = save_file + '_trial-{}'.format(trials)
else:
save_file_trial = None
plot_neural_reconstruction_traces(
np.vstack(labels_og_all), np.vstack(labels_pred_all), scale=plot_scale,
save_file=save_file_trial, format=format,
xtick_locs=xtick_locs, frame_rate=frame_rate, max_traces=max_traces, add_r2=add_r2,
add_legend=add_legend, colored_predictions=colored_predictions)
def plot_latent_traversals(
lab, expt, animal, session, model_class, alpha, beta, gamma, n_ae_latents, rng_seed_model,
experiment_name, n_labels, label_idxs, label_min_p=5, label_max_p=95,
channel=0, n_frames_zs=4, n_frames_zu=4, trial=None, trial_idx=1, batch_idx=1,
crop_type=None, crop_kwargs=None, sess_idx=0, save_file=None, format='pdf', **kwargs):
"""Plot video frames representing the traversal of individual dimensions of the latent space.
Parameters
----------
lab : :obj:`str`
lab id
expt : :obj:`str`
expt id
animal : :obj:`str`
animal id
session : :obj:`str`
session id
model_class : :obj:`str`
model class in which to perform traversal; currently supported models are:
'ae' | 'vae' | 'cond-ae' | 'cond-vae' | 'beta-tcvae' | 'cond-ae-msp' | 'ps-vae'
note that models with conditional encoders are not currently supported
alpha : :obj:`float`
ps-vae alpha value
beta : :obj:`float`
ps-vae beta value
gamma : :obj:`array-like`
ps-vae gamma value
n_ae_latents : :obj:`int`
dimensionality of unsupervised latents
rng_seed_model : :obj:`int`
model seed
experiment_name : :obj:`str`
test-tube experiment name
n_labels : :obj:`str`
dimensionality of supervised latent space (ignored when using fully unsupervised models)
label_idxs : :obj:`array-like`, optional
set of label indices (dimensions) to individually traverse
label_min_p : :obj:`float`, optional
lower percentile of training data used to compute range of traversal
label_max_p : :obj:`float`, optional
upper percentile of training data used to compute range of traversal
channel : :obj:`int`, optional
image channel to plot
n_frames_zs : :obj:`int`, optional
number of frames (points) to display for traversal through supervised dimensions
n_frames_zu : :obj:`int`, optional
number of frames (points) to display for traversal through unsupervised dimensions
trial : :obj:`int`, optional
trial index into all possible trials (train, val, test); one of `trial` or `trial_idx`
must be specified; `trial` takes precedence over `trial_idx`
trial_idx : :obj:`int`, optional
trial index of base frame used for interpolation
batch_idx : :obj:`int`, optional
batch index of base frame used for interpolation
crop_type : :obj:`str`, optional
cropping method used on interpolated frames
'fixed' | None
crop_kwargs : :obj:`dict`, optional
if crop_type is not None, provides information about the crop
keys for 'fixed' type: 'y_0', 'x_0', 'y_ext', 'x_ext'; window is
(y_0 - y_ext, y_0 + y_ext) in vertical direction and
(x_0 - x_ext, x_0 + x_ext) in horizontal direction
sess_idx : :obj:`int`, optional
session index into data generator
save_file : :obj:`str`, optional
absolute path of save file; does not need file extension
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
kwargs
arguments are keys of `hparams`, for example to set `train_frac`, `rng_seed_model`, etc.
"""
hparams = _get_psvae_hparams(
model_class=model_class, alpha=alpha, beta=beta, gamma=gamma, n_ae_latents=n_ae_latents,
experiment_name=experiment_name, rng_seed_model=rng_seed_model, **kwargs)
if model_class == 'cond-ae-msp' or model_class == 'ps-vae':
hparams['n_ae_latents'] += n_labels
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
model_ae, data_generator = get_best_model_and_data(hparams, Model=None, version=version)
# get latent/label info
latent_range = get_input_range(
'latents', hparams, model=model_ae, data_gen=data_generator, min_p=15, max_p=85,
version=version)
label_range = get_input_range(
'labels', hparams, sess_ids=sess_ids, sess_idx=sess_idx,
min_p=label_min_p, max_p=label_max_p)
try:
label_sc_range = get_input_range(
'labels_sc', hparams, sess_ids=sess_ids, sess_idx=sess_idx,
min_p=label_min_p, max_p=label_max_p)
except KeyError:
import copy
label_sc_range = copy.deepcopy(label_range)
# ----------------------------------------
# label traversals
# ----------------------------------------
interp_func_label = interpolate_1d
plot_func_label = plot_1d_frame_array
save_file_new = save_file + '_label-traversals'
if model_class == 'cond-ae' or model_class == 'cond-ae-msp' or model_class == 'ps-vae' or \
model_class == 'cond-vae':
# get model input for this trial
ims_pt, ims_np, latents_np, labels_pt, labels_np, labels_2d_pt, labels_2d_np = \
get_model_input(
data_generator, hparams, model_ae, trial_idx=trial_idx, trial=trial,
compute_latents=True, compute_scaled_labels=False, compute_2d_labels=False)
if labels_2d_np is None:
labels_2d_np = np.copy(labels_np)
if crop_type == 'fixed':
crop_kwargs_ = crop_kwargs
else:
crop_kwargs_ = None
# perform interpolation
ims_label, markers_loc_label, ims_crop_label = interp_func_label(
'labels', model_ae, ims_pt[None, batch_idx, :], latents_np[None, batch_idx, :],
labels_np[None, batch_idx, :], labels_2d_np[None, batch_idx, :],
mins=label_range['min'], maxes=label_range['max'],
n_frames=n_frames_zs, input_idxs=label_idxs, crop_type=crop_type,
mins_sc=label_sc_range['min'], maxes_sc=label_sc_range['max'],
crop_kwargs=crop_kwargs_, ch=channel)
# plot interpolation
if crop_type:
marker_kwargs = {
'markersize': 30, 'markeredgewidth': 8, 'markeredgecolor': [1, 1, 0],
'fillstyle': 'none'}
plot_func_label(
ims_crop_label, markers=None, marker_kwargs=marker_kwargs, save_file=save_file_new,
format=format)
else:
marker_kwargs = {
'markersize': 20, 'markeredgewidth': 5, 'markeredgecolor': [1, 1, 0],
'fillstyle': 'none'}
plot_func_label(
ims_label, markers=None, marker_kwargs=marker_kwargs, save_file=save_file_new,
format=format)
# ----------------------------------------
# latent traversals
# ----------------------------------------
interp_func_latent = interpolate_1d
plot_func_latent = plot_1d_frame_array
save_file_new = save_file + '_latent-traversals'
if hparams['model_class'] == 'cond-ae-msp' or hparams['model_class'] == 'ps-vae':
latent_idxs = n_labels + np.arange(n_ae_latents)
elif hparams['model_class'] == 'ae' \
or hparams['model_class'] == 'vae' \
or hparams['model_class'] == 'cond-vae' \
or hparams['model_class'] == 'beta-tcvae':
latent_idxs = np.arange(n_ae_latents)
else:
raise NotImplementedError
# simplify options here
scaled_labels = False
twod_labels = False
crop_type = None
crop_kwargs = None
labels_2d_np_sel = None
# get model input for this trial
ims_pt, ims_np, latents_np, labels_pt, labels_np, labels_2d_pt, labels_2d_np = \
get_model_input(
data_generator, hparams, model_ae, trial=trial, trial_idx=trial_idx,
compute_latents=True, compute_scaled_labels=scaled_labels,
compute_2d_labels=twod_labels)
latents_np[:, n_labels:] = 0
if hparams['model_class'] == 'ae' or hparams['model_class'] == 'beta-tcvae':
labels_np_sel = labels_np
else:
labels_np_sel = labels_np[None, batch_idx, :]
# perform interpolation
ims_latent, markers_loc_latent_, ims_crop_latent = interp_func_latent(
'latents', model_ae, ims_pt[None, batch_idx, :], latents_np[None, batch_idx, :],
labels_np_sel, labels_2d_np_sel,
mins=latent_range['min'], maxes=latent_range['max'],
n_frames=n_frames_zu, input_idxs=latent_idxs, crop_type=crop_type,
mins_sc=None, maxes_sc=None, crop_kwargs=crop_kwargs, ch=channel)
# plot interpolation
marker_kwargs = {
'markersize': 20, 'markeredgewidth': 5, 'markeredgecolor': [1, 1, 0],
'fillstyle': 'none'}
plot_func_latent(
ims_latent, markers=None, marker_kwargs=marker_kwargs, save_file=save_file_new,
format=format)
def make_latent_traversal_movie(
lab, expt, animal, session, model_class, alpha, beta, gamma, n_ae_latents,
rng_seed_model, experiment_name, n_labels, trial_idxs, batch_idxs, trials,
label_min_p=5, label_max_p=95, channel=0, sess_idx=0, n_frames=10, n_buffer_frames=5,
crop_kwargs=None, n_cols=3, movie_kwargs={}, panel_titles=None, order_idxs=None,
split_movies=False, save_file=None, **kwargs):
"""Create a multi-panel movie with each panel showing traversals of an individual latent dim.
The traversals will start at a lower bound, increase to an upper bound, then return to a lower
bound; the traversal of each dimension occurs simultaneously. It is also possible to specify
multiple base frames for the traversals; the traversal of each base frame is separated by
several blank frames. Note that support for plotting markers on top of the corresponding
supervised dimensions is not supported by this function.
Parameters
----------
lab : :obj:`str`
lab id
expt : :obj:`str`
expt id
animal : :obj:`str`
animal id
session : :obj:`str`
session id
model_class : :obj:`str`
model class in which to perform traversal; currently supported models are:
'ae' | 'vae' | 'cond-ae' | 'cond-vae' | 'ps-vae'
note that models with conditional encoders are not currently supported
alpha : :obj:`float`
ps-vae alpha value
beta : :obj:`float`
ps-vae beta value
gamma : :obj:`array-like`
ps-vae gamma value
n_ae_latents : :obj:`int`
dimensionality of unsupervised latents
rng_seed_model : :obj:`int`
model seed
experiment_name : :obj:`str`
test-tube experiment name
n_labels : :obj:`str`
dimensionality of supervised latent space (ignored when using fully unsupervised models)
trial_idxs : :obj:`array-like` of :obj:`int`
trial indices of base frames used for interpolation; if an entry is an integer, the
corresponding entry in `trials` must be `None`. This value is a trial index into all
*test* trials, and is not affected by how the test trials are shuffled. The `trials`
argument (see below) takes precedence over `trial_idxs`.
batch_idxs : :obj:`array-like` of :obj:`int`
batch indices of base frames used for interpolation; correspond to entries in `trial_idxs`
and `trials`
trials : :obj:`array-like` of :obj:`int`
trials of base frame used for interpolation; if an entry is an integer, the
corresponding entry in `trial_idxs` must be `None`. This value is a trial index into all
possible trials (train, val, test), whereas `trial_idxs` is an index only into test trials
label_min_p : :obj:`float`, optional
lower percentile of training data used to compute range of traversal
label_max_p : :obj:`float`, optional
upper percentile of training data used to compute range of traversal
channel : :obj:`int`, optional
image channel to plot
sess_idx : :obj:`int`, optional
session index into data generator
n_frames : :obj:`int`, optional
number of frames (points) to display for traversal across latent dimensions; the movie
will display a traversal of `n_frames` across each dim, then another traversal of
`n_frames` in the opposite direction
n_buffer_frames : :obj:`int`, optional
number of blank frames to insert between base frames
crop_kwargs : :obj:`dict`, optional
if crop_type is not None, provides information about the crop (for a fixed crop window)
keys : 'y_0', 'x_0', 'y_ext', 'x_ext'; window is
(y_0 - y_ext, y_0 + y_ext) in vertical direction and
(x_0 - x_ext, x_0 + x_ext) in horizontal direction
n_cols : :obj:`int`, optional
movie is `n_cols` panels wide
movie_kwargs : :obj:`dict`, optional
additional kwargs for individual panels; possible keys are 'markersize', 'markeredgecolor',
'markeredgewidth', and 'text_color'
panel_titles : :obj:`list` of :obj:`str`, optional
optional titles for each panel
order_idxs : :obj:`array-like`, optional
used to reorder panels (which are plotted in row-major order) if desired; can also be used
to choose a subset of latent dimensions to include
split_movies : :obj:`bool`, optional
True to save a separate latent traversal movie for each latent dimension
save_file : :obj:`str`, optional
absolute path of save file; does not need file extension, will automatically be saved as
mp4. To save as a gif, include the '.gif' file extension in `save_file`
kwargs
arguments are keys of `hparams`, for example to set `train_frac`, `rng_seed_model`, etc.
"""
panel_titles = [''] * (n_labels + n_ae_latents) if panel_titles is None else panel_titles
hparams = _get_psvae_hparams(
model_class=model_class, alpha=alpha, beta=beta, gamma=gamma, n_ae_latents=n_ae_latents,
experiment_name=experiment_name, rng_seed_model=rng_seed_model, **kwargs)
if model_class == 'cond-ae-msp' or model_class == 'ps-vae':
hparams['n_ae_latents'] += n_labels
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
model_ae, data_generator = get_best_model_and_data(hparams, Model=None, version=version)
# get latent/label info
latent_range = get_input_range(
'latents', hparams, model=model_ae, data_gen=data_generator, min_p=15, max_p=85,
version=version)
label_range = get_input_range(
'labels', hparams, sess_ids=sess_ids, sess_idx=sess_idx,
min_p=label_min_p, max_p=label_max_p)
# ----------------------------------------
# collect frames/latents/labels
# ----------------------------------------
if hparams['model_class'] == 'vae':
csl = False
c2dl = False
else:
csl = False
c2dl = False
ims_pt = []
ims_np = []
latents_np = []
labels_pt = []
labels_np = []
# labels_2d_pt = []
# labels_2d_np = []
for trial, trial_idx in zip(trials, trial_idxs):
ims_pt_, ims_np_, latents_np_, labels_pt_, labels_np_, labels_2d_pt_, labels_2d_np_ = \
get_model_input(
data_generator, hparams, model_ae, trial_idx=trial_idx, trial=trial,
compute_latents=True, compute_scaled_labels=csl, compute_2d_labels=c2dl,
max_frames=200)
ims_pt.append(ims_pt_)
ims_np.append(ims_np_)
latents_np.append(latents_np_)
labels_pt.append(labels_pt_)
labels_np.append(labels_np_)
# labels_2d_pt.append(labels_2d_pt_)
# labels_2d_np.append(labels_2d_np_)
if hparams['model_class'] == 'ps-vae':
label_idxs = np.arange(n_labels)
latent_idxs = n_labels + np.arange(n_ae_latents)
elif hparams['model_class'] == 'vae':
label_idxs = []
latent_idxs = np.arange(hparams['n_ae_latents'])
elif hparams['model_class'] == 'cond-vae':
label_idxs = np.arange(n_labels)
latent_idxs = np.arange(hparams['n_ae_latents'])
else:
raise Exception
# ----------------------------------------
# label traversals
# ----------------------------------------
ims_all = []
txt_strs_all = []
txt_strs_titles = []
for label_idx in label_idxs:
ims = []
txt_strs = []
for b, batch_idx in enumerate(batch_idxs):
if hparams['model_class'] == 'ps-vae':
points = np.array([latents_np[b][batch_idx, :]] * 3)
elif hparams['model_class'] == 'cond-vae':
points = np.array([labels_np[b][batch_idx, :]] * 3)
else:
raise Exception
points[0, label_idx] = label_range['min'][label_idx]
points[1, label_idx] = label_range['max'][label_idx]
points[2, label_idx] = label_range['min'][label_idx]
ims_curr, inputs = interpolate_point_path(
'labels', model_ae, ims_pt[b][None, batch_idx, :],
labels_np[b][None, batch_idx, :], points=points, n_frames=n_frames, ch=channel,
crop_kwargs=crop_kwargs)
ims.append(ims_curr)
txt_strs += [panel_titles[label_idx] for _ in range(len(ims_curr))]
if label_idx == 0:
tmp = trial_idxs[b] if trial_idxs[b] is not None else trials[b]
txt_strs_titles += [
'base frame %02i-%02i' % (tmp, batch_idx) for _ in range(len(ims_curr))]
# add blank frames
if len(batch_idxs) > 1:
y_pix, x_pix = ims_curr[0].shape
ims.append([np.zeros((y_pix, x_pix)) for _ in range(n_buffer_frames)])
txt_strs += ['' for _ in range(n_buffer_frames)]
if label_idx == 0:
txt_strs_titles += ['' for _ in range(n_buffer_frames)]
ims_all.append(np.vstack(ims))
txt_strs_all.append(txt_strs)
# ----------------------------------------
# latent traversals
# ----------------------------------------
crop_kwargs_ = None
for latent_idx in latent_idxs:
ims = []
txt_strs = []
for b, batch_idx in enumerate(batch_idxs):
points = np.array([latents_np[b][batch_idx, :]] * 3)
# points[:, latent_idxs] = 0
points[0, latent_idx] = latent_range['min'][latent_idx]
points[1, latent_idx] = latent_range['max'][latent_idx]
points[2, latent_idx] = latent_range['min'][latent_idx]
if hparams['model_class'] == 'vae':
labels_curr = None
else:
labels_curr = labels_np[b][None, batch_idx, :]
ims_curr, inputs = interpolate_point_path(
'latents', model_ae, ims_pt[b][None, batch_idx, :],
labels_curr, points=points, n_frames=n_frames, ch=channel,
crop_kwargs=crop_kwargs_)
ims.append(ims_curr)
if hparams['model_class'] == 'cond-vae':
txt_strs += [panel_titles[latent_idx + n_labels] for _ in range(len(ims_curr))]
else:
txt_strs += [panel_titles[latent_idx] for _ in range(len(ims_curr))]
if latent_idx == 0 and len(label_idxs) == 0:
# add frame ids here if skipping labels
tmp = trial_idxs[b] if trial_idxs[b] is not None else trials[b]
txt_strs_titles += [
'base frame %02i-%02i' % (tmp, batch_idx) for _ in range(len(ims_curr))]
# add blank frames
if len(batch_idxs) > 1:
y_pix, x_pix = ims_curr[0].shape
ims.append([np.zeros((y_pix, x_pix)) for _ in range(n_buffer_frames)])
txt_strs += ['' for _ in range(n_buffer_frames)]
if latent_idx == 0 and len(label_idxs) == 0:
txt_strs_titles += ['' for _ in range(n_buffer_frames)]
ims_all.append(np.vstack(ims))
txt_strs_all.append(txt_strs)
# ----------------------------------------
# make video
# ----------------------------------------
if order_idxs is None:
# don't change order of latents
order_idxs = np.arange(len(ims_all))
if split_movies:
for idx in order_idxs:
if save_file.split('.')[-1] == 'gif':
save_file_new = save_file[:-4] + '_latent-%i.gif' % idx
elif save_file.split('.')[-1] == 'mp4':
save_file_new = save_file[:-4] + '_latent-%i.mp4' % idx
else:
save_file_new = save_file + '_latent-%i' % 0
make_interpolated(
ims=ims_all[idx],
text=txt_strs_all[idx],
text_title=txt_strs_titles,
save_file=save_file_new, scale=3, **movie_kwargs)
else:
make_interpolated_multipanel(
ims=[ims_all[i] for i in order_idxs],
text=[txt_strs_all[i] for i in order_idxs],
text_title=txt_strs_titles,
save_file=save_file, scale=2, n_cols=n_cols, **movie_kwargs)
```
#### File: behavenet/psvae_utils/cond_ae.py
```python
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
import os
import torch
from torch import from_numpy
from behavenet import get_user_dir
from behavenet.fitting.eval import get_reconstruction
from behavenet.fitting.utils import get_best_model_and_data
from behavenet.fitting.utils import get_expt_dir
from behavenet.fitting.utils import get_session_dir
from behavenet.fitting.utils import get_lab_example
from behavenet.plotting import concat, save_movie
from behavenet.plotting.cond_ae_utils import get_crop
# --------------------------------------
# reconstruction movies
# --------------------------------------
def make_reconstruction_movie_wrapper(
hparams, save_file, model_info, trial_idxs=None, trials=None, sess_idx=0,
max_frames=400, xtick_locs=None, add_traces=False, label_names=None, frame_rate=15,
layout_pattern=None):
"""Produce movie with original video, reconstructed video, and residual, with optional traces.
This is a high-level function that loads the model described in the hparams dictionary and
produces the necessary predicted video frames.
Parameters
----------
hparams : :obj:`dict`
needs to contain enough information to specify an autoencoder
save_file : :obj:`str`
full save file (path and filename)
trial_idxs : :obj:`list`, optional
list of test trials to construct videos from; if :obj:`NoneType`, use first test
trial
sess_idx : :obj:`int`, optional
session index into data generator
add_traces : :obj:`bool`, optional
add traces alongside reconstructions
label_names : :obj:`list`, optional
ps-vae label names
max_frames : :obj:`int`, optional
maximum number of frames to animate from a trial
frame_rate : :obj:`float`, optional
frame rate of saved movie
layout_pattern : :obj:`np.ndarray`
boolean array that determines where reconstructed frames are placed in a grid
"""
from behavenet.fitting.eval import get_reconstruction
from behavenet.fitting.utils import get_best_model_and_data
from behavenet.plotting.ae_utils import make_reconstruction_movie
from behavenet.plotting.cond_ae_utils import get_model_input
n_latents = hparams['n_ae_latents']
n_labels = hparams['n_labels']
expt_name = hparams.get('experiment_name', None)
# set up models to fit
titles = ['Original']
for model in model_info:
titles.append(model.get('title', ''))
# insert original video at front
model_info.insert(0, {'model_class': None})
ims_recon = [[] for _ in titles]
latents = [[] for _ in titles]
if trial_idxs is None:
trial_idxs = [None] * len(trials)
if trials is None:
trials = [None] * len(trial_idxs)
if isinstance(sess_idx, int):
sess_idx = sess_idx * np.ones((len(trials),))
for i, model in enumerate(model_info):
if i == 0:
continue
# further specify model
version = model.get('version', 'best')
hparams['experiment_name'] = model.get('experiment_name', expt_name)
hparams['model_class'] = model.get('model_class')
model_ae, data_generator = get_best_model_and_data(hparams, version=version)
# get images
for trial_idx, trial, s_idx in zip(trial_idxs, trials, sess_idx):
# get model inputs
ims_orig_pt, ims_orig_np, _, labels_pt, labels_np, labels_2d_pt, _ = get_model_input(
data_generator, hparams, model_ae, trial_idx=trial_idx, trial=trial,
sess_idx=s_idx, max_frames=max_frames, compute_latents=False,
compute_2d_labels=False)
# get model outputs
if hparams['model_class'] == 'labels-images':
ims_recon_tmp = get_reconstruction(model_ae, labels_pt)
latents_tmp = labels_np
else:
ims_recon_tmp, latents_tmp = get_reconstruction(
model_ae, ims_orig_pt, labels=labels_pt, labels_2d=labels_2d_pt,
return_latents=True, dataset=s_idx)
# orient to match labels
if hparams['model_class'] == 'ps-vae' or hparams['model_class'] == 'msps-vae':
latents_tmp[:, :n_labels] *= \
np.sign(model_ae.encoding.D.weight.data.cpu().detach().numpy())
ims_recon[i].append(ims_recon_tmp)
latents[i].append(latents_tmp)
# add a couple black frames to separate trials
final_trial = True
if (trial_idx is not None and (trial_idx != trial_idxs[-1])) or \
(trial is not None and (trial != trials[-1])):
final_trial = False
n_buffer = 5
if not final_trial:
_, n, y_p, x_p = ims_recon[i][-1].shape
ims_recon[i].append(np.zeros((n_buffer, n, y_p, x_p)))
latents[i].append(np.nan * np.zeros((n_buffer, latents[i][-1].shape[1])))
if i == 1: # deal with original frames only once
ims_recon[0].append(ims_orig_np)
latents[0].append([])
# add a couple black frames to separate trials
if not final_trial:
_, n, y_p, x_p = ims_recon[0][-1].shape
ims_recon[0].append(np.zeros((n_buffer, n, y_p, x_p)))
for i, (ims, zs) in enumerate(zip(ims_recon, latents)):
ims_recon[i] = np.concatenate(ims, axis=0)
latents[i] = np.concatenate(zs, axis=0)
if layout_pattern is None:
if len(titles) < 4:
n_rows, n_cols = 1, len(titles)
elif len(titles) == 4:
n_rows, n_cols = 2, 2
elif len(titles) > 4:
n_rows, n_cols = 2, 3
else:
raise ValueError('too many models')
else:
assert np.sum(layout_pattern) == len(ims_recon)
n_rows, n_cols = layout_pattern.shape
count = 0
for pos_r in layout_pattern:
for pos_c in pos_r:
if not pos_c:
ims_recon.insert(count, [])
titles.insert(count, [])
count += 1
if add_traces:
make_reconstruction_movie_wtraces(
ims=ims_recon, latents=latents, titles=titles, xtick_locs=xtick_locs,
frame_rate_beh=hparams['frame_rate'], scale=0.3, label_names=label_names,
save_file=save_file, frame_rate=frame_rate)
else:
make_reconstruction_movie(
ims=ims_recon, titles=titles, n_rows=n_rows, n_cols=n_cols,
save_file=save_file, frame_rate=frame_rate)
def make_reconstruction_movie_wtraces(
ims, latents, titles, xtick_locs, frame_rate_beh, scale=0.5, label_names=None,
save_file=None, frame_rate=None, show_residuals=True):
"""Inflexible function for plotting recons, residuals, and latents for several models."""
from matplotlib.gridspec import GridSpec
n_channels, y_pix, x_pix = ims[0].shape[1:]
n_time, n_ae_latents = latents[1].shape
n_rows = len(ims)
if show_residuals:
n_cols = 3
fig_width = 12
width_ratios = [1, 1, 2]
im_cols = 2
else:
n_cols = 2
fig_width = 9
width_ratios = [1, 2]
im_cols = 1
fig_height = 3 * n_rows
fig = plt.figure(figsize=(fig_width, fig_height))
gs = GridSpec(n_rows, n_cols, figure=fig, width_ratios=width_ratios)
axs = []
for c in range(n_cols):
for r in range(n_rows):
# col 0, then col 1
axs.append(fig.add_subplot(gs[r, c]))
for i, ax in enumerate(axs):
ax.set_yticks([])
if i == len(ims) or i == len(ims) * im_cols:
ax.set_axis_off() # assume original frames first, no latents
if i > len(ims) * im_cols:
ax.get_xaxis().set_tick_params(labelsize=12, direction='in')
elif i < len(ims) * im_cols:
ax.set_xticks([])
else:
ax.set_axis_off() # assume original frames first, no latents
# check that the axes are correct
fontsize = 12
idx = 0
for title in titles:
axs[idx].set_title(titles[idx], fontsize=fontsize)
idx += 1
# blank (legend)
idx += 1
if show_residuals:
for t in range(len(ims) - 1):
axs[idx].set_title('Residual')
idx += 1
# blank
idx += 1
# ps-vae latents
axs[idx].set_title('MSPS-VAE latents', fontsize=fontsize)
axs[idx].set_xticklabels([])
if xtick_locs is not None and frame_rate_beh is not None:
axs[idx].set_xticks(xtick_locs)
idx += 1
axs[idx].set_title('VAE latents', fontsize=fontsize)
if len(ims) > 3:
# take care of VAE ticks
axs[idx].set_xticklabels([])
if xtick_locs is not None and frame_rate_beh is not None:
axs[idx].set_xticks(xtick_locs)
# labels-images
idx += 1
axs[idx].set_title('Labels', fontsize=fontsize)
# properly label x-axis of final row
if xtick_locs is not None and frame_rate_beh is not None:
axs[idx].set_xticks(xtick_locs)
axs[idx].set_xticklabels((np.asarray(xtick_locs) / frame_rate_beh).astype('int'))
axs[idx].set_xlabel('Time (s)', fontsize=fontsize)
else:
axs[idx].set_xlabel('Time (bins)', fontsize=fontsize)
im_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}
tr_kwargs = {'animated': True, 'linewidth': 2}
txt_kwargs = {
'fontsize': 10, 'horizontalalignment': 'left', 'verticalalignment': 'center'}
latents_ae_color = [0.2, 0.2, 0.2]
# -----------------
# labels
# -----------------
if label_names is not None:
idx = len(ims)
axs[idx].set_prop_cycle(None) # reset colors
for l, label in enumerate(label_names):
c = axs[idx]._get_lines.get_next_color()
y_val = l / (len(label_names) + 2) + 1 / (len(label_names) + 2)
axs[idx].plot(
[0.1, 0.15], [y_val, y_val], '-', color=c, transform=axs[idx].transAxes)
axs[idx].text(
0.17, y_val, label, color='k', transform=axs[idx].transAxes, **txt_kwargs)
time = np.arange(n_time)
# normalize traces
latents_sc = []
for zs in latents:
if len(zs) == 0:
latents_sc.append(None)
else:
means = np.nanmean(zs, axis=0)
stds = np.nanstd(zs, axis=0) / scale
latents_sc.append((zs - means) / stds)
# ims is a list of lists, each row is a list of artists to draw in the
# current frame; here we are just animating one artist, the image, in
# each frame
ims_ani = []
for i in range(n_time):
ims_curr = []
idx = 0
if i % 100 == 0:
print('processing frame %03i/%03i' % (i, n_time))
# -----------------
# behavioral videos
# -----------------
for idx in range(n_rows):
ims_tmp = ims[idx][i, 0] if n_channels == 1 else concat(ims[idx][i])
im = axs[idx].imshow(ims_tmp, **im_kwargs)
ims_curr.append(im)
# -----------------
# residuals
# -----------------
if show_residuals:
for idx in range(1, n_rows):
ims_tmp = ims[idx][i, 0] if n_channels == 1 else concat(ims[idx][i])
ims_og = ims[0][i, 0] if n_channels == 1 else concat(ims[0][i])
im = axs[n_rows + idx].imshow(ims_tmp - ims_og + 0.5, **im_kwargs)
ims_curr.append(im)
# -----------------
# traces
# -----------------
# latents over time
for idx in range(n_rows * im_cols + 1, n_rows * im_cols + n_rows):
axs[idx].set_prop_cycle(None) # reset colors
for latent in range(latents_sc[idx - n_rows * im_cols].shape[1]):
if idx == n_rows * im_cols + 1:
latents_color = axs[idx]._get_lines.get_next_color()
elif idx == n_rows * im_cols + 3:
# hack to get labels-images traces w/ colors
latents_color = axs[idx]._get_lines.get_next_color()
else:
latents_color = [0, 0, 0]
im = axs[idx].plot(
time[0:i + 1], latent + latents_sc[idx - n_rows * im_cols][0:i + 1, latent],
color=latents_color, alpha=0.7, **tr_kwargs)[0]
axs[idx].spines['top'].set_visible(False)
axs[idx].spines['right'].set_visible(False)
axs[idx].spines['left'].set_visible(False)
ims_curr.append(im)
ims_ani.append(ims_curr)
plt.tight_layout(pad=0)
ani = animation.ArtistAnimation(fig, ims_ani, blit=True, repeat_delay=1000)
save_movie(save_file, ani, frame_rate=frame_rate)
# --------------------------------------
# latent/label traversal functions
# --------------------------------------
def plot_frame_array_labels(
hparams, ims, plot_func, interp_func, crop_type, markers, save_outputs=False, **kwargs):
n_frames = len(ims[0])
marker_kwargs = {
'markersize': 20, 'markeredgewidth': 5, 'markeredgecolor': [1, 1, 0],
'fillstyle': 'none'}
if save_outputs:
save_file = os.path.join(
get_user_dir('fig'),
'ae', 'D=%02i_label-manipulation_%s_%s-crop.png' %
(hparams['n_ae_latents'], hparams['session'], crop_type))
else:
save_file = None
if plot_func.__name__ == 'plot_2d_frame_array':
"""plot generated frames and differences separately"""
# plot generated frames
if crop_type:
plot_func(
ims, markers=None, marker_kwargs=marker_kwargs, save_file=save_file, **kwargs)
else:
plot_func(
ims, markers=markers, marker_kwargs=marker_kwargs, save_file=save_file, **kwargs)
# plot differences
if interp_func.__name__ == 'interpolate_2d':
# use upper left corner as base frame for whole grid
base_im = ims[0][0]
ims_diff = [[None for _ in range(n_frames)] for _ in range(n_frames)]
for r, ims_list_y in enumerate(ims):
for c, im in enumerate(ims_list_y):
ims_diff[r][c] = 0.5 + (im - base_im)
else:
# use left-most column as base frame for each row
ims_diff = [[None for _ in range(n_frames)] for _ in range(len(ims))]
for r, ims_list_y in enumerate(ims):
for c, im in enumerate(ims_list_y):
ims_diff[r][c] = 0.5 + (im - ims[r][0]) # compare across rows
plot_func(
ims_diff, markers=markers, marker_kwargs=marker_kwargs, save_file=save_file, **kwargs)
else:
"""plot generated frames and differences together"""
if crop_type:
plot_func(
ims, markers=None, marker_kwargs=marker_kwargs, save_file=save_file, **kwargs)
else:
plot_func(
ims, markers=None, marker_kwargs=marker_kwargs, save_file=save_file, **kwargs)
def plot_frame_array_latents(
hparams, ims, plot_func, interp_func, n_latents, crop_type, markers, save_outputs=False,
**kwargs):
n_frames = len(ims[0])
if crop_type:
marker_kwargs = {
'markersize': 30, 'markeredgewidth': 8, 'markeredgecolor': [1, 1, 0],
'fillstyle': 'none'}
else:
marker_kwargs = {
'markersize': 20, 'markeredgewidth': 5, 'markeredgecolor': [1, 1, 0],
'fillstyle': 'none'}
if save_outputs:
save_file = os.path.join(
get_user_dir('fig'),
'ae', 'D=%02i_latent-manipulation_%s_%s-crop.png' %
(hparams['n_ae_latents'], hparams['session'], crop_type))
else:
save_file = None
if plot_func.__name__ == 'plot_2d_frame_array':
"""plot generated frames and differences separately"""
# plot generated frames
if crop_type:
plot_func(ims, markers=None, marker_kwargs=marker_kwargs, save_file=save_file)
else:
plot_func(ims, markers=markers, marker_kwargs=marker_kwargs, save_file=save_file)
# plot differences
if n_latents == 2 and interp_func.__name__ == 'interpolate_2d':
# use top-most row as base frame for each column
ims_diff = [[None for _ in range(n_frames)] for _ in range(n_frames)]
for r, ims_list_y in enumerate(ims):
for c, im in enumerate(ims_list_y):
ims_diff[r][c] = 0.5 + (im - ims[0][c]) # compare across cols
plot_func(
ims_diff, markers=markers, marker_kwargs=marker_kwargs, save_file=save_file,
**kwargs)
# use left-most column as base frame for each row
ims_diff = [[None for _ in range(n_frames)] for _ in range(len(ims))]
for r, ims_list_y in enumerate(ims):
for c, im in enumerate(ims_list_y):
ims_diff[r][c] = 0.5 + (im - ims[r][0]) # compare across rows
plot_func(
ims_diff, markers=markers, marker_kwargs=marker_kwargs, save_file=save_file, **kwargs)
else:
"""plot generated frames and differences together"""
if crop_type:
raise NotImplementedError
else:
plot_func(
ims, markers=None, marker_kwargs=marker_kwargs, save_file=save_file, **kwargs)
def get_cluster_prototype_ims(dataset, n_clusters, as_numpy=False):
import pickle
from sklearn.cluster import KMeans
# ----------------------
# load AE model
# ----------------------
if dataset == 'ibl':
lab = 'ibl'
expt = 'ephys'
iters = 200
frac = '0.5'
n_ae_latents = 6
elif dataset == 'dipoppa':
lab = 'dipoppa'
expt = 'pupil'
iters = 200
frac = '0.5'
n_ae_latents = 5
elif dataset == 'musall':
lab = 'musall'
expt = 'vistrained'
iters = 200
frac = '0.5'
n_ae_latents = 7
else:
raise Exception
# set model info
version = 'best' # test-tube version; 'best' finds the version with the lowest mse
sess_idx = 0 # when using a multisession, this determines which session is used
hparams = {
'data_dir': get_user_dir('data'),
'save_dir': get_user_dir('save'),
'experiment_name': 'iters-%i_frac-%s' % (iters, frac),
'model_class': 'vae',
'model_type': 'conv',
'n_ae_latents': n_ae_latents,
'rng_seed_data': 0,
'trial_splits': '8;1;1;0',
'train_frac': float(frac),
'rng_seed_model': 0,
'conditional_encoder': False,
}
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
# build model(s)
if hparams['model_class'] == 'ae':
from behavenet.models import AE as Model
elif hparams['model_class'] == 'vae':
from behavenet.models import VAE as Model
else:
raise NotImplementedError
model_ae, data_generator = get_best_model_and_data(hparams, Model, version=version)
# ----------------------
# cluster latents
# ----------------------
# load latents
sess_id = str('%s_%s_%s_%s_latents.pkl' % (
hparams['lab'], hparams['expt'], hparams['animal'], hparams['session']))
filename = os.path.join(
hparams['expt_dir'], 'version_%i' % 0, sess_id)
if not os.path.exists(filename):
print('exporting latents...', end='')
from behavenet.fitting.eval import export_latents
export_latents(data_generator, model_ae)
print('done')
latent_dict = pickle.load(open(filename, 'rb'))
# get all test latents
dtype = 'test'
latents = []
trials = []
frames = []
for trial in latent_dict['trials'][dtype]:
ls = latent_dict['latents'][trial]
n_frames_batch = ls.shape[0]
latents.append(ls)
trials.append([trial] * n_frames_batch)
frames.append(np.arange(n_frames_batch))
# print('trial: %i, frames: %i' % (trial, n_frames_batch))
latents = np.concatenate(latents)
trials = np.concatenate(trials)
frames = np.concatenate(frames)
np.random.seed(0) # to reproduce clusters
kmeans = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)
distances = kmeans.fit_transform(latents)
clust_id = kmeans.predict(latents)
# ----------------------
# get representative example from each cluster
# ----------------------
example_idxs = []
trial_idxs = []
frame_idxs = []
ims = []
# for clust in range(n_clusters):
for clust in range(n_clusters):
# get any frame in this cluster
# frame_idx = np.where(clust_ids==clust)[0][0]
# get frame that is closest to cluster center
frame_idx = np.argmin(distances[:, clust])
example_idxs.append(frame_idx)
trial_curr = trials[frame_idx]
frame_curr = frames[frame_idx]
batch = data_generator.datasets[0][trial_curr]
if as_numpy:
im = batch['images'].cpu().detach().numpy()[frame_curr, 0]
else:
im = batch['images'][None, frame_curr]
trial_idxs.append(trial_curr)
frame_idxs.append(frame_curr)
ims.append(im)
return example_idxs, trial_idxs, frame_idxs, ims
def interpolate_points(points, n_frames):
"""Scale arbitrary points"""
n_points = len(points)
if isinstance(n_frames, int):
n_frames = [n_frames] * (n_points - 1)
assert len(n_frames) == (n_points - 1)
inputs_list = []
for p in range(n_points - 1):
p0 = points[None, p]
p1 = points[None, p + 1]
p_vec = (p1 - p0) / n_frames[p]
for pn in range(n_frames[p]):
vec = p0 + pn * p_vec
inputs_list.append(vec)
return inputs_list
def interpolate_point_path(
interp_type, model, ims_0, latents_0, labels_0, points, n_frames=10, ch=0,
crop_kwargs=None, apply_inverse_transform=True):
"""Return reconstructed images created by interpolating through multiple points.
Parameters
----------
interp_type : :obj:`str`
'latents' | 'labels'
model : :obj:`behavenet.models` object
autoencoder model
ims_0 : :obj:`np.ndarray`
base images for interpolating labels, of shape (1, n_channels, y_pix, x_pix)
latents_0 : :obj:`np.ndarray`
base latents of shape (1, n_latents); these values will be used if
`interp_type='labels'`, and they will be ignored if `inter_type='latents'`
(since `points` will be used)
labels_0 : :obj:`np.ndarray`
base labels of shape (1, n_labels); these values will be used if
`interp_type='latents'`, and they will be ignored if `inter_type='labels'`
(since `points` will be used)
points : :obj:`list`
one entry for each point in path; each entry is an np.ndarray of shape (n_latents,)
n_frames : :obj:`int` or :obj:`array-like`
number of interpolation points between each point; can be an integer that is used
for all paths, or an array/list of length one less than number of points
ch : :obj:`int`, optional
specify which channel of input images to return (can only be a single value)
Returns
-------
:obj:`tuple`
- ims_list (:obj:`list` of :obj:`np.ndarray`) interpolated images
- inputs_list (:obj:`list` of :obj:`np.ndarray`) interpolated values
"""
if model.hparams.get('conditional_encoder', False):
raise NotImplementedError
n_points = len(points)
if isinstance(n_frames, int):
n_frames = [n_frames] * (n_points - 1)
assert len(n_frames) == (n_points - 1)
ims_list = []
inputs_list = []
for p in range(n_points - 1):
p0 = points[None, p]
p1 = points[None, p + 1]
p_vec = (p1 - p0) / n_frames[p]
for pn in range(n_frames[p]):
vec = p0 + pn * p_vec
if interp_type == 'latents':
if model.hparams['model_class'] == 'cond-ae' \
or model.hparams['model_class'] == 'cond-vae':
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=apply_inverse_transform,
labels=torch.from_numpy(labels_0).float().to(model.hparams['device']))
else:
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=apply_inverse_transform)
elif interp_type == 'labels':
if model.hparams['model_class'] == 'cond-ae-msp' \
or model.hparams['model_class'] == 'sss-vae':
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=True)
else: # cond-ae
im_tmp = get_reconstruction(
model, ims_0,
labels=torch.from_numpy(vec).float().to(model.hparams['device']))
else:
raise NotImplementedError
if crop_kwargs is not None:
if not isinstance(ch, int):
raise ValueError('"ch" must be an integer to use crop_kwargs')
ims_list.append(get_crop(
im_tmp[0, ch],
crop_kwargs['y_0'], crop_kwargs['y_ext'],
crop_kwargs['x_0'], crop_kwargs['x_ext']))
else:
if isinstance(ch, int):
ims_list.append(np.copy(im_tmp[0, ch]))
else:
ims_list.append(np.copy(concat(im_tmp[0])))
inputs_list.append(vec)
return ims_list, inputs_list
def make_interpolated(
ims, save_file, markers=None, text=None, text_title=None, text_color=[1, 1, 1],
frame_rate=20, scale=3, markersize=10, markeredgecolor='w', markeredgewidth=1, ax=None):
n_frames = len(ims)
y_pix, x_pix = ims[0].shape
if ax is None:
fig_width = scale / 2
fig_height = y_pix / x_pix * scale / 2
fig = plt.figure(figsize=(fig_width, fig_height), dpi=300)
ax = plt.gca()
return_ims = False
else:
return_ims = True
ax.set_xticks([])
ax.set_yticks([])
default_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}
txt_kwargs = {
'fontsize': 4, 'color': text_color, 'fontname': 'monospace',
'horizontalalignment': 'left', 'verticalalignment': 'center',
'transform': ax.transAxes}
# ims is a list of lists, each row is a list of artists to draw in the current frame; here we
# are just animating one artist, the image, in each frame
ims_ani = []
for i, im in enumerate(ims):
im_tmp = []
im_tmp.append(ax.imshow(im, **default_kwargs))
# [s.set_visible(False) for s in ax.spines.values()]
if markers is not None:
im_tmp.append(ax.plot(
markers[i, 0], markers[i, 1], '.r', markersize=markersize,
markeredgecolor=markeredgecolor, markeredgewidth=markeredgewidth)[0])
if text is not None:
im_tmp.append(ax.text(0.02, 0.06, text[i], **txt_kwargs))
if text_title is not None:
im_tmp.append(ax.text(0.02, 0.92, text_title[i], **txt_kwargs))
ims_ani.append(im_tmp)
if return_ims:
return ims_ani
else:
plt.tight_layout(pad=0)
ani = animation.ArtistAnimation(fig, ims_ani, blit=True, repeat_delay=1000)
save_movie(save_file, ani, frame_rate=frame_rate)
def make_interpolated_wdist(
ims, latents, points, save_file, xlim=None, ylim=None, frame_rate=20):
n_frames = len(ims)
y_pix, x_pix = ims[0].shape
n_channels = 1
scale_ = 4
fig_width = scale_ * n_channels
fig_height = y_pix / x_pix * scale_ / 2
fig, axes = plt.subplots(1, 2, figsize=(fig_width, fig_height), dpi=300)
# get rid of ticks on video panel
axes[1].set_xticks([])
axes[1].set_yticks([])
default_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}
# ims is a list of lists, each row is a list of artists to draw in the current frame; here we
# are just animating one artist, the image, in each frame
ims_ani = []
for i, im in enumerate(ims):
frames_curr = []
im_tmp = axes[1].imshow(im, **default_kwargs)
frames_curr.append(im_tmp)
fr_tmp0 = axes[0].scatter(
latents[:, 0], latents[:, 1], c=[[0, 0, 0]], s=0.5, alpha=0.25,
linewidths=0)
# axes[0].invert_yaxis()
axes[0].set_xlabel('Left paw marker x\n(normalized)', fontsize=8)
axes[0].set_ylabel('Left paw marker y\n(normalized)', fontsize=8)
if xlim is not None:
axes[0].set_xlim(xlim)
if ylim is not None:
axes[0].set_ylim(ylim)
frames_curr.append(fr_tmp0)
fr_tmp1 = axes[0].plot(
points[:, 0], points[:, 1], 'sr', markersize=1, markeredgecolor='r')[0]
# axes[0].invert_yaxis()
frames_curr.append(fr_tmp1)
fr_tmp2 = axes[0].plot(
points[i, 0], points[i, 1], 'sr', markersize=3, markeredgecolor='r')[0]
# axes[0].invert_yaxis()
frames_curr.append(fr_tmp2)
ims_ani.append(frames_curr)
plt.tight_layout(pad=0)
ani = animation.ArtistAnimation(fig, ims_ani, blit=True, repeat_delay=1000)
save_movie(save_file, ani, frame_rate=frame_rate)
def make_interpolated_multipanel(
ims, save_file, markers=None, text=None, text_title=None,
frame_rate=20, n_cols=3, scale=1, **kwargs):
n_panels = len(ims)
markers = [None] * n_panels if markers is None else markers
text = [None] * n_panels if text is None else text
y_pix, x_pix = ims[0][0].shape
n_rows = int(np.ceil(n_panels / n_cols))
fig_width = scale / 2 * n_cols
fig_height = y_pix / x_pix * scale / 2 * n_rows
fig, axes = plt.subplots(n_rows, n_cols, figsize=(fig_width, fig_height), dpi=300)
plt.subplots_adjust(wspace=0, hspace=0, left=0, bottom=0, right=1, top=1)
# ims is a list of lists, each row is a list of artists to draw in the current frame; here we
# are just animating one artist, the image, in each frame
ims_ani = []
for i, (ims_curr, markers_curr, text_curr) in enumerate(zip(ims, markers, text)):
col = i % n_cols
row = int(np.floor(i / n_cols))
if i == 0:
text_title_str = text_title
else:
text_title_str = None
ims_ani_curr = make_interpolated(
ims=ims_curr, markers=markers_curr, text=text_curr, text_title=text_title_str,
ax=axes[row, col], save_file=None, **kwargs)
ims_ani.append(ims_ani_curr)
# turn off other axes
i += 1
while i < n_rows * n_cols:
col = i % n_cols
row = int(np.floor(i / n_cols))
axes[row, col].set_axis_off()
i += 1
# rearrange ims:
# currently a list of length n_panels, each element of which is a list of length n_t
# we need a list of length n_t, each element of which is a list of length n_panels
n_frames = len(ims_ani[0])
ims_final = [[] for _ in range(n_frames)]
for i in range(n_frames):
for j in range(n_panels):
ims_final[i] += ims_ani[j][i]
# plt.tight_layout(pad=0)
ani = animation.ArtistAnimation(fig, ims_final, blit=True, repeat_delay=1000)
save_movie(save_file, ani, frame_rate=frame_rate)
# --------------------------------------
# disentangling functions
# --------------------------------------
def compute_latent_std(hparams, version, sess_id=0, dtype='val'):
import pickle
# load latents
sess_id = str('%s_%s_%s_%s_latents.pkl' % (
hparams['lab'], hparams['expt'], hparams['animal'], hparams['session']))
filename = os.path.join(
hparams['expt_dir'], 'version_%i' % version, sess_id)
if not os.path.exists(filename):
# print('exporting latents...', end='')
# export_latents(data_generator, model_ae)
# print('done')
raise NotImplementedError
latent_dict = pickle.load(open(filename, 'rb'))
print('loaded latents from %s' % filename)
# get all test latents
latents = []
for trial in latent_dict['trials'][dtype]:
ls = latent_dict['latents'][trial]
latents.append(ls)
latents = np.concatenate(latents)
return np.std(latents, axis=0)
def compute_metric(
ims, model, n_ae_latents, latents_std, std_range=1, L=20, dist='uniform'):
# L: number of random samples in latent space
if model.hparams['model_class'] == 'sss-vae':
n_labels = model.hparams.get('n_labels', 0)
else:
n_labels = 0
lowest_vars = np.zeros((n_ae_latents, n_ae_latents))
for im in ims:
# step 4: push a sample frame $x_k$ through the encoder
if model.hparams['model_class'] == 'sss-vae':
y, w, logvar, pi, outsize = model.encoding(im.to(model.hparams['device']))
y_np = y.cpu().detach().numpy()
w_np = w.cpu().detach().numpy()
logvar_np = logvar.cpu().detach().numpy()
elif model.hparams['model_class'] == 'beta-tcvae':
mu, logvar, pi, outsize = model.encoding(im.to(model.hparams['device']))
mu_np = mu.cpu().detach().numpy()
logvar_np = logvar.cpu().detach().numpy()
elif model.hparams['model_class'] == 'cond-vae':
# z_hat_, _, _, _ = model.encoding(im.to(model.hparams['device']))
# z_hat = z_hat_.cpu().detach().numpy()
raise NotImplementedError
else:
raise NotImplementedError
for d in range(n_ae_latents):
w_hats = np.zeros((L, n_ae_latents))
for l in range(L):
# step 5: fix latent dim $d$ and sample the remaining dims from a
# uniform/normal/posterior distribution
idxs_ = np.arange(n_ae_latents + n_labels)
idxs = idxs_[idxs_ != (n_labels + d)]
# sample all but dim $d$ (super+unsupervised dims)
if model.hparams['model_class'] == 'sss-vae':
# sample all unsupervised dims but dim $d$
# n_idxs = np.concatenate([np.arange(n_labels), [n_labels + d]])
# idxs = np.where(~np.in1d(idxs_, n_idxs))[0]
z_hat_np = np.concatenate([y_np, w_np], axis=1).astype('float32')
else:
z_hat_np = np.copy(mu_np)
# uniform distribution
if dist == 'uniform':
# sample latents from range [-std_range*s, +std_range*s]
latents_range = 2 * std_range * latents_std
latents_offset = -std_range * latents_std
eps = np.random.random((1, n_ae_latents + n_labels))
sample = latents_range * eps + latents_offset
elif dist == 'normal':
eps = np.random.randn(1, n_ae_latents + n_labels)
sample = latents_std * np.sqrt(std_range) * eps
elif dist == 'posterior':
eps = np.random.randn(1, n_ae_latents + n_labels)
sample = z_hat_np + np.sqrt(np.exp(logvar_np)) * eps
else:
raise NotImplementedError
# update latents with sampled values
z_hat_np[0, idxs] = sample[0, idxs]
# step 6: push this new latent vector through the decoder and back
# through the encoder to get the updated latent vector
z_hat = from_numpy(z_hat_np).to(model.hparams['device'])
im_hat = model.decoding(z_hat, pi, outsize)
if model.hparams['model_class'] == 'sss-vae':
_, w_hat, _, _, _ = model.encoding(im_hat)
elif model.hparams['model_class'] == 'beta-tcvae':
w_hat, _, _, _ = model.encoding(im_hat)
else:
raise NotImplementedError
w_hats[l, :] = w_hat.cpu().detach().numpy()
# step 8: divide the $L$ latent representations by their standard deviation $s$
w_hats /= latents_std[n_labels:]
# step 9: record the dimension with the smallest variance across the $L$ samples
idx_min_var = np.argmin(np.var(w_hats, axis=0))
lowest_vars[d, idx_min_var] += 1
# lowest_vars[d] += np.var(w_hats, axis=0)
error_rate = 1 - np.sum(np.diag(lowest_vars)) / (len(ims) * n_ae_latents)
return lowest_vars, error_rate
def compute_metric_scan(
n_scans, model, n_ae_latents, latents_std, std_range=1, L=20, dist='uniform'):
# L: number of random samples in latent space
n_labels = model.hparams.get('n_labels', 0)
ranges = [np.linspace(-std_range * s, std_range * s, n_scans) for s in latents_std]
lowest_vars = np.zeros((n_ae_latents, n_ae_latents))
for s in range(n_scans):
for d in range(n_ae_latents):
w_hats = np.zeros((L, n_ae_latents))
for l in range(L):
if dist == 'uniform':
# sample latents from range [-std_range*s, +std_range*s]
latents_range = 2 * std_range * latents_std
latents_offset = -std_range * latents_std
eps = np.random.random((1, n_ae_latents + n_labels))
sample = latents_range * eps + latents_offset
elif dist == 'normal':
eps = np.random.randn(1, n_ae_latents + n_labels)
sample = latents_std * np.sqrt(std_range) * eps
else:
raise NotImplementedError
# update latents with sampled values
z_hat_np = sample
z_hat_np[0, d] = ranges[d][s]
# step 6: push this new latent vector through the decoder and back
# through the encoder to get the updated latent vector
z_hat = from_numpy(z_hat_np.astype('float32')).to(model.hparams['device'])
im_hat = model.decoding(z_hat, None, None)
_, w_hat, _, _, _ = model.encoding(im_hat)
w_hats[l, :] = w_hat.cpu().detach().numpy()
# step 8: divide the $L$ latent representations by their standard deviation $s$
w_hats /= latents_std[n_labels:]
# step 9: record the dimension with the smallest variance across the $L$ samples
idx_min_var = np.argmin(np.var(w_hats, axis=0))
lowest_vars[d, idx_min_var] += 1
# lowest_vars[d] += np.var(w_hats, axis=0)
error_rate = 1 - np.sum(np.diag(lowest_vars)) / (n_scans * n_ae_latents)
return lowest_vars, error_rate
def compute_metric_traversal(
ims, model, n_ae_latents, latents_std, latent_range, label_range=None, L=20):
# L: number of random samples in latent space
if model.hparams['model_class'] == 'sss-vae':
n_labels = model.hparams.get('n_labels', 0)
else:
n_labels = 0
mses = np.zeros((n_ae_latents, n_ae_latents, len(ims)))
for i, im in enumerate(ims):
# step 4: push a sample frame $x_k$ through the encoder
if model.hparams['model_class'] == 'sss-vae':
y, w, logvar, pi, outsize = model.encoding(im.to(model.hparams['device']))
y_np = y.cpu().detach().numpy()
w_np = w.cpu().detach().numpy()
logvar_np = logvar.cpu().detach().numpy()
elif model.hparams['model_class'] == 'beta-tcvae':
mu, logvar, pi, outsize = model.encoding(im.to(model.hparams['device']))
mu_np = mu.cpu().detach().numpy()
logvar_np = logvar.cpu().detach().numpy()
elif model.hparams['model_class'] == 'cond-vae':
# z_hat_, _, _, _ = model.encoding(im.to(model.hparams['device']))
# z_hat = z_hat_.cpu().detach().numpy()
raise NotImplementedError
else:
raise NotImplementedError
# compute change in reconstructed z when manipulating single dim of original z
for d in range(n_ae_latents):
if model.hparams['model_class'] == 'sss-vae':
z_hat_np = np.concatenate([y_np, w_np], axis=1).astype('float32')
else:
z_hat_np = np.copy(mu_np)
points = np.array([z_hat_np] * 2)
if d < n_labels:
points[0, d] = label_range['min'][d]
points[1, d] = label_range['max'][d]
ims_re, inputs = interpolate_point_path(
'labels', model, im.to(model.hparams['device']), None, None, points=points,
n_frames=L)
else:
points[0, d] = latent_range['min'][d]
points[1, d] = latent_range['max'][d]
ims_re, inputs = interpolate_point_path(
'latents', model, im.to(model.hparams['device']), None, None, points=points,
n_frames=L)
zs_og = np.vstack(inputs)
inputs_re = get_latents(ims_re, model)
zs_re = np.vstack(inputs_re)
mses[d, :, i] = np.mean(np.square(zs_og - zs_re)) / (latents_std ** 2)
return np.mean(mses, axis=2)
def get_latents(ims, model):
use_mean = True
dataset = 0
zs = []
for im in ims:
# _, latents = get_reconstruction(model, im[None, None, ...], return_latents=True)
if not isinstance(im, torch.Tensor):
im = torch.Tensor(im[None, None, ...])
if model.hparams['model_class'] == 'cond-ae-msp':
ims_recon, latents, _ = model(im, dataset=dataset)
elif model.hparams['model_class'] == 'vae' \
or model.hparams['model_class'] == 'beta-tcvae':
ims_recon, latents, _, _ = model(im, dataset=dataset, use_mean=use_mean)
elif model.hparams['model_class'] == 'sss-vae':
ims_recon, _, latents, _, yhat = model(im, dataset=dataset, use_mean=use_mean)
elif model.hparams['model_class'] == 'cond-ae':
ims_recon, latents = model(im, dataset=dataset, labels=labels, labels_2d=labels_2d)
elif model.hparams['model_class'] == 'cond-vae':
ims_recon, latents, _, _ = model(im, dataset=dataset, labels=labels,
labels_2d=labels_2d)
else:
raise ValueError('Invalid model class %s' % model.hparams['model_class'])
latents = latents.cpu().detach().numpy()
if model.hparams['model_class'] == 'sss-vae':
yhat = yhat.cpu().detach().numpy()
latents[:, :model.hparams['n_labels']] = yhat
zs.append(latents)
return zs
```
#### File: behavenet/psvae_utils/ssmutils.py
```python
import os
import matplotlib.pyplot as plt
import numpy as np
import pickle
from ssm import HMM
from ssm.messages import forward_pass
from scipy.special import logsumexp
from sklearn.metrics import r2_score
# -------------------------------------------------------------------------------------------------
# model fitting functions
# -------------------------------------------------------------------------------------------------
def collect_model_kwargs(
n_lags_standard, n_lags_sticky, n_lags_recurrent, kappas, observations,
observation_kwargs={}, hierarchical=False, fit_hmm=False):
"""Collect model kwargs.
Args:
n_lags_standard (array-like): number of ar lags for standard transitions
n_lags_sticky (array-like): number of ar lags for sticky transitions
n_lags_recurrent (array-like): number of ar lags for recurrent transitions
kappas (array-like): hyperparam for upweighting diagonal when using sticky transitions
observations (str): 'ar' | 'diagonal_ar' | 'robust_ar' | 'diagonal_robust_ar'
observation_kwargs (dict): additional kwargs for obs (e.g. tags for hierarchical models)
hierarchical (bool): True to fit model with hierarchical observations
fit_hmm (bool): True to include hmm in collected models
Returns:
dict
"""
model_kwargs = {}
if hierarchical:
if len(n_lags_recurrent) > 0 or len(n_lags_sticky) > 0:
raise NotImplementedError('Cannot fit hierarchical models on recurrent or sticky obs')
hier_str = 'hierarchical_'
else:
hier_str = ''
# add hmms with standard transitions
if fit_hmm:
model_kwargs['hmm'] = {
'transitions': 'standard',
'observations': hier_str + 'gaussian',
'observation_kwargs': observation_kwargs}
# add models with standard transitions
for lags in n_lags_standard:
model_kwargs['arhmm-%i' % lags] = {
'transitions': 'standard',
'observations': hier_str + observations,
'observation_kwargs': {**{'lags': lags}, **observation_kwargs}}
# add models with sticky transitions
for lags in n_lags_sticky:
for kappa in kappas:
kap = int(np.log10(kappa))
model_kwargs['arhmm-s%i-%i' % (kap, lags)] = {
'transitions': 'sticky',
'transition_kwargs': {'kappa': kappa},
'observations': hier_str + observations,
'observation_kwargs': {**{'lags': lags}, **observation_kwargs}}
# add models with recurrent transitions
for lags in n_lags_recurrent:
model_kwargs['rarhmm-%i' % lags] = {
'transitions': 'recurrent',
'observations': hier_str + observations,
'observation_kwargs': {**{'lags': lags}, **observation_kwargs}}
return model_kwargs
def fit_with_random_restarts(
K, D, obs, lags, datas, transitions='stationary', tags=None, num_restarts=5, num_iters=100,
method='em', tolerance=1e-4, save_path=None, init_type='kmeans', dist_mat=None,
cond_var_A=1e-3, cond_var_V=1e-3, cond_var_b=1e-1, **kwargs):
all_models = []
all_lps = []
if not os.path.exists(save_path):
os.makedirs(save_path)
# Fit the model with a few random restarts
for r in range(num_restarts):
print("Restart ", r)
np.random.seed(r)
# build model file
model_kwargs = {
'transitions': transitions,
'observations': obs,
'observation_kwargs': {'lags': lags},
}
model_name = get_model_name(K, model_kwargs)
save_file = os.path.join(save_path, model_name + '_init-%i.pkl' % r)
print(save_file)
if os.path.exists(save_file):
print('loading results from %s' % save_file)
with open(save_file, 'rb') as f:
results = pickle.load(f)
model = results['model']
lps = results['lps']
else:
observation_kwargs = dict(lags=lags)
if obs.find('hierarchical') > -1:
observation_kwargs['cond_variance_A'] = cond_var_A
observation_kwargs['cond_variance_V'] = cond_var_V
observation_kwargs['cond_variance_b'] = cond_var_b
observation_kwargs['cond_dof_Sigma'] = 10
observation_kwargs['tags'] = np.unique(tags)
if transitions.find('hierarchical') > -1:
transition_kwargs = {'tags': np.unique(tags)}
else:
transition_kwargs = None
model = HMM(
K, D,
observations=obs, observation_kwargs=observation_kwargs,
transitions=transitions, transition_kwargs=transition_kwargs)
init_model(init_type, model, datas, dist_mat=dist_mat)
lps = model.fit(
datas, tags=tags, method=method, tolerance=tolerance,
num_iters=num_iters, # em
# num_epochs=num_iters, # stochastic em
initialize=False,
**kwargs)
results = {'model': model, 'lps': lps}
with open(save_file, 'wb') as f:
pickle.dump(results, f)
all_models.append(model)
all_lps.append(lps)
if isinstance(lps, tuple):
best_model_idx = np.argmax([lps[0][-1] for lps in all_lps])
else:
best_model_idx = np.argmax([lps[-1] for lps in all_lps])
best_model = all_models[best_model_idx]
best_lps = all_lps[best_model_idx]
return best_model, best_lps, all_models, all_lps
def init_model(init_type, model, datas, inputs=None, masks=None, tags=None, dist_mat=None):
"""Initialize ARHMM model according to one of several schemes.
The different schemes correspond to different ways of assigning discrete states to the data
points; once these states have been assigned, linear regression is used to estimate the model
parameters (dynamics matrices, biases, covariance matrices)
* init_type = random: states are randomly and uniformly assigned
* init_type = kmeans: perform kmeans clustering on data; note that this is not a great scheme
for arhmms on the fly data, because the fly is often standing still in many different
poses. These poses will be assigned to different clusters, thus breaking the "still" state
into many initial states
* init_type = diff-clust: perform kmeans clustering on differenced data
* init_type = pca_me: first compute the motion energy of the data (square of differences of
consecutive time points) and then perform PCA. A threshold applied to the first dimension
does a reasonable job of separating the data into "moving" and "still" timepoints. All
"still" timepoints are assigned one state, and the remaining timepoints are clustered using
kmeans with (K-1) clusters
* init_type = arhmm: refinement of pca_me approach: perform pca on the data and take top 4
components (to speed up computation) and fit a 2-state arhmm to roughly split the data into
"still" and "moving" states (this is itself initialized with pca_me). Then as before the
moving state is clustered into K-1 states using kmeans.
Args:
init_type (str):
'random' | 'kmeans' | 'pca_me' | 'arhmm'
model (ssm.HMM object):
datas (list of np.ndarrays):
inputs (list of np.ndarrays):
masks (list of np.ndarrays):
tags (list of np.ndarrays):
"""
from ssm.util import one_hot
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from scipy.signal import savgol_filter
from scipy.stats import norm
Ts = [data.shape[0] for data in datas]
K = model.K
D = model.observations.D
M = model.observations.M
lags = model.observations.lags
if inputs is None:
inputs = [np.zeros((data.shape[0],) + (M,)) for data in datas]
elif not isinstance(inputs, (list, tuple)):
inputs = [inputs]
if masks is None:
masks = [np.ones_like(data, dtype=bool) for data in datas]
elif not isinstance(masks, (list, tuple)):
masks = [masks]
if tags is None:
tags = [None] * len(datas)
elif not isinstance(tags, (list, tuple)):
tags = [tags]
# --------------------------
# initialize discrete states
# --------------------------
if init_type == 'random':
zs = [np.random.choice(K, size=T) for T in Ts]
elif init_type == 'umap-kmeans':
import umap
u = umap.UMAP()
xs = u.fit_transform(np.vstack(datas))
km = KMeans(K)
km.fit(xs)
zs = np.split(km.labels_, np.cumsum(Ts)[:-1])
elif init_type == 'umap-kmeans-diff':
import umap
u = umap.UMAP()
datas_diff = [np.vstack([np.zeros((1, D)), np.diff(data, axis=0)]) for data in datas]
xs = u.fit_transform(np.vstack(datas_diff))
km = KMeans(K)
km.fit(xs)
zs = np.split(km.labels_, np.cumsum(Ts)[:-1])
elif init_type == 'kmeans':
km = KMeans(K)
km.fit(np.vstack(datas))
zs = np.split(km.labels_, np.cumsum(Ts)[:-1])
elif init_type == 'kmeans-diff':
km = KMeans(K)
datas_diff = [np.vstack([np.zeros((1, D)), np.diff(data, axis=0)]) for data in datas]
km.fit(np.vstack(datas_diff))
zs = np.split(km.labels_, np.cumsum(Ts)[:-1])
elif init_type == 'kmeans-move':
D_ = 4
if datas[0].shape[1] > D_:
# perform pca
pca = PCA(D_)
xs = pca.fit_transform(np.vstack(datas))
xs = np.split(xs, np.cumsum(Ts)[:-1])
else:
# keep original data
import copy
D_ = D
xs = copy.deepcopy(datas)
model_init = HMM(
K=2, D=D_, M=0, transitions='standard', observations='ar',
observations_kwargs={'lags': 1})
init_model('pca-me', model_init, xs)
model_init.fit(
xs, inputs=None, method='em', num_iters=100, tolerance=1e-2,
initialize=False, transitions_mstep_kwargs={'optimizer': 'lbfgs', 'tol': 1e-3})
# make still state 0th state
mses = [np.mean(np.square(model_init.observations.As[i] - np.eye(D_))) for i in range(2)]
if mses[1] < mses[0]:
# permute states
model_init.permute([1, 0])
moving_state = 1
inputs_tr = [None] * len(datas)
zs = [model_init.most_likely_states(x, u) for x, u in zip(xs, inputs_tr)]
zs = np.concatenate(zs, axis=0)
# cluster moving data
km = KMeans(K - 1)
if np.sum(zs == moving_state) > K - 1:
datas_diff = [np.vstack([np.zeros((1, D)), np.diff(data, axis=0)]) for data in datas]
km.fit(np.vstack(datas_diff)[zs == moving_state])
zs[zs == moving_state] = km.labels_ + 1
# split
zs = np.split(zs, np.cumsum(Ts)[:-1])
elif init_type == 'ar-clust':
from sklearn.cluster import SpectralClustering # , AgglomerativeClustering
# code from <NAME>
t_win = 5
t_gap = 5
num_trials = len(datas)
if dist_mat is None:
dist_mat = compute_dist_mat(datas, t_win, t_gap)
# Cluster!
clustering = SpectralClustering(n_clusters=K, affinity='precomputed').fit(
1 / (1 + dist_mat / t_win))
# Now take the clustered segments, and use them to determine the cluster of the individual
# time points
# In the scenario where the segments are nonoverlapping, then we can simply assign the time
# point cluster as its segment cluster
# In the scenario where the segments are overlapping, we will let a time point's cluster be
# the cluster to which the majority of its segments belonged
# Below zs_init is the assigned discrete states of each time point for a trial. zs_init2
# tracks the clusters of each time point across all the segments it's part of
zs = []
for tr in range(num_trials):
xhat = datas[tr]
T = xhat.shape[0]
n_steps = int((T - t_win) / t_gap) + 1
t_st = 0
zs_init = np.zeros(T)
zs_init2 = np.zeros([T, K]) # For each time point, tracks how many segments it's
# part of belong to each cluster
for k in range(n_steps):
t_end = t_st + t_win
t_idx = np.arange(t_st, t_end)
if t_gap == t_win:
zs_init[t_idx] = clustering.labels_[k]
else:
zs_init2[t_idx, clustering.labels_[k]] += 1
t_st = t_st + t_gap
if t_gap != t_win:
max_els = zs_init2.max(axis=1)
for t in range(T):
if np.sum(zs_init2[t] == max_els[t]) == 1:
# if there's a single best cluster, assign it
zs_init[t] = np.where(zs_init2[t] == max_els[t])[0]
else:
# multiple best clusters
if zs_init[t - 1] in np.where(zs_init2[t] == max_els[t])[0]:
# use best cluster from previous time point if it's in the running
zs_init[t] = zs_init[t - 1]
else:
# just use first element
zs_init[t] = np.where(zs_init2[t] == max_els[t])[0][0]
# I think this offset is correct rather than just using zs_init, but it should be
# double checked.
zs.append(np.concatenate([[0], zs_init[:-1]]))
zs = np.concatenate(zs, axis=0)
# split
zs = np.split(zs, np.cumsum(Ts)[:-1])
elif init_type == 'arhmm':
D_ = 4
if datas[0].shape[1] > D_:
# perform pca
pca = PCA(D_)
xs = pca.fit_transform(np.vstack(datas))
xs = np.split(xs, np.cumsum(Ts)[:-1])
else:
# keep original data
import copy
D_ = D
xs = copy.deepcopy(datas)
model_init = HMM(
K=2, D=D_, M=0, transitions='standard', observations='ar',
observations_kwargs={'lags': 1})
init_model('pca-me', model_init, xs)
model_init.fit(
xs, inputs=None, method='em', num_iters=100, tolerance=1e-2,
initialize=False, transitions_mstep_kwargs={'optimizer': 'lbfgs', 'tol': 1e-3})
# make still state 0th state
mses = [np.mean(np.square(model_init.observations.As[i] - np.eye(D_))) for i in range(2)]
if mses[1] < mses[0]:
# permute states
model_init.permute([1, 0])
moving_state = 1
inputs_tr = [None] * len(datas)
zs = [model_init.most_likely_states(x, u) for x, u in zip(xs, inputs_tr)]
zs = np.concatenate(zs, axis=0)
# cluster moving data
km = KMeans(K - 1)
if np.sum(zs == moving_state) > K - 1:
km.fit(np.vstack(datas)[zs == moving_state])
zs[zs == moving_state] = km.labels_ + 1
# split
zs = np.split(zs, np.cumsum(Ts)[:-1])
elif init_type == 'pca-me':
# pca on motion energy
datas_filt = np.copy(datas)
for dtmp in datas_filt:
for i in range(dtmp.shape[1]):
dtmp[:, i] = savgol_filter(dtmp[:, i], 5, 2)
pca = PCA(1)
me = np.square(np.diff(np.vstack(datas_filt), axis=0))
xs = pca.fit_transform(np.concatenate([np.zeros((1, D)), me], axis=0))[:, 0]
xs = xs / np.max(xs)
# threshold data to get moving/non-moving
thresh = 0.01
zs = np.copy(xs)
zs[xs < thresh] = 0
zs[xs >= thresh] = 1
# cluster moving data
km = KMeans(K - 1)
km.fit(np.vstack(datas)[zs == 1])
zs[zs == 1] = km.labels_ + 1
# split
zs = np.split(zs, np.cumsum(Ts)[:-1])
else:
raise NotImplementedError('Invalid "init_type" of "%s"' % init_type)
# ------------------------
# estimate dynamics params
# ------------------------
if init_type != 'em-exact':
Ezs = [one_hot(z, K) for z in zs]
expectations = [(Ez, None, None) for Ez in Ezs]
if str(model.observations.__class__).find('Hierarchical') > -1:
obs = model.observations
# initialize parameters for global ar model
obs.global_ar_model.m_step(expectations, datas, inputs, masks, tags)
# update prior
obs._update_hierarchical_prior()
# Copy global parameters to per-group models
for ar in obs.per_group_ar_models:
ar.As = obs.global_ar_model.As.copy()
ar.Vs = obs.global_ar_model.Vs.copy()
ar.bs = obs.global_ar_model.bs.copy()
ar.Sigmas = obs.global_ar_model.Sigmas.copy()
ar.As = norm.rvs(obs.global_ar_model.As, np.sqrt(obs.cond_variance_A))
ar.Vs = norm.rvs(obs.global_ar_model.Vs, np.sqrt(obs.cond_variance_V))
ar.bs = norm.rvs(obs.global_ar_model.bs, np.sqrt(obs.cond_variance_b))
ar.Sigmas = obs.global_ar_model.Sigmas.copy()
else:
model.observations.m_step(expectations, datas, inputs, masks, tags)
return None
def compute_dist_mat(datas, t_win, t_gap):
def sse(x, y):
return np.sum(np.square(x - y))
from sklearn.linear_model import Ridge
Ts = [data.shape[0] for data in datas]
num_trials = len(datas)
# Elements of segs contain triplets of
# 1) trial
# 2) time point of beginning of segment
# 3) time point of end of segment
segs = []
# Get all segments based on predefined t_win and t_gap
for tr in range(num_trials):
T = Ts[tr]
n_steps = int((T - t_win) / t_gap) + 1
for k in range(n_steps):
segs.append([tr, k * t_gap, k * t_gap + t_win])
# Fit a regression (solve for the dynamics matrix) within each segment
num_segs = len(segs)
sse_mat = np.zeros([num_segs, num_segs])
for j, seg in enumerate(segs):
[tr, t_st, t_end] = seg
X = datas[tr][t_st:t_end + 1, :]
rr = Ridge(alpha=1, fit_intercept=True)
rr.fit(X[:-1], X[1:] - X[:-1])
# Then see how well the dynamics from segment J works at making predictions on
# segment K (determined via sum squared error of predictions)
for k, seg2 in enumerate(segs):
[tr, t_st, t_end] = seg2
X = datas[tr][t_st:t_end + 1, :]
sse_mat[j, k] = sse(X[1:] - X[:-1], rr.predict(X[:-1]))
# Make "sse_mat" into a proper, symmetric distance matrix for clustering
tmp = sse_mat - np.diag(sse_mat)
dist_mat = tmp + tmp.T
return dist_mat
# -------------------------------------------------------------------------------------------------
# model evaluation functions
# -------------------------------------------------------------------------------------------------
def extract_state_runs(states, indxs, min_length=20):
"""
Find contiguous chunks of data with the same state
Args:
states (list):
indxs (list):
min_length (int):
Returns:
list
"""
K = len(np.unique(np.concatenate([np.unique(s) for s in states])))
state_snippets = [[] for _ in range(K)]
for curr_states, curr_indxs in zip(states, indxs):
i_beg = 0
curr_state = curr_states[i_beg]
curr_len = 1
for i in range(1, len(curr_states)):
next_state = curr_states[i]
if next_state != curr_state:
# record indices if state duration long enough
if curr_len >= min_length:
state_snippets[curr_state].append(
curr_indxs[i_beg:i])
i_beg = i
curr_state = next_state
curr_len = 1
else:
curr_len += 1
# end of trial cleanup
if next_state == curr_state:
# record indices if state duration long enough
if curr_len >= min_length:
state_snippets[curr_state].append(curr_indxs[i_beg:i])
return state_snippets
def viterbi_ll(model, datas):
"""Calculate log-likelihood of viterbi path."""
inputs = [None] * len(datas)
masks = [None] * len(datas)
tags = [None] * len(datas)
states = [model.most_likely_states(x, u) for x, u in zip(datas, inputs)]
ll = 0
for data, input, mask, tag, state in zip(datas, inputs, masks, tags, states):
if input is None:
input = np.zeros_like(data)
if mask is None:
mask = np.ones_like(data, dtype=bool)
likelihoods = model.observations.log_likelihoods(data, input, mask, tag)
ll += np.sum(likelihoods[(np.arange(state.shape[0]), state)])
return ll
def k_step_ll(model, datas, k_max):
"""Determine the k-step ahead ll."""
M = (model.M,) if isinstance(model.M, int) else model.M
L = model.observations.lags # AR lags
k_step_lls = 0
for data in datas:
input = np.zeros((data.shape[0],) + M)
mask = np.ones_like(data, dtype=bool)
pi0 = model.init_state_distn.initial_state_distn
Ps = model.transitions.transition_matrices(data, input, mask, tag=None)
lls = model.observations.log_likelihoods(data, input, mask, tag=None)
T, K = lls.shape
# Forward pass gets the predicted state at time t given
# observations up to and including those from time t
alphas = np.zeros((T, K))
forward_pass(pi0, Ps, lls, alphas)
# pz_tt = p(z_{t}, x_{1:t}) = alpha(z_t) / p(x_{1:t})
pz_tt = np.exp(alphas - logsumexp(alphas, axis=1, keepdims=True))
log_likes_list = []
for k in range(k_max + 1):
if k == 0:
# p(x_t | x_{1:T}) = \sum_{z_t} p(x_t | z_t) p(z_t | x_{1:t})
pz_tpkt = np.copy(pz_tt)
assert np.allclose(np.sum(pz_tpkt, axis=1), 1.0)
log_likes_0 = logsumexp(lls[k_max:] + np.log(pz_tpkt[k_max:]), axis=1)
# pred_data = get_predicted_obs(model, data, pz_tpkt)
else:
if k == 1:
# p(z_{t+1} | x_{1:t}) =
# \sum_{z_t} p(z_{t+1} | z_t) alpha(z_t) / p(x_{1:t})
pz_tpkt = np.copy(pz_tt)
# p(z_{t+k} | x_{1:t}) =
# \sum_{z_{t+k-1}} p(z_{t+k} | z_{t+k-1}) p(z_{z+k-1} | x_{1:t})
if Ps.shape[0] == 1: # stationary transition matrix
pz_tpkt = np.matmul(pz_tpkt[:-1, None, :], Ps)[:, 0, :]
else: # dynamic transition matrix
pz_tpkt = np.matmul(pz_tpkt[:-1, None, :], Ps[k - 1:])[:, 0, :]
assert np.allclose(np.sum(pz_tpkt, axis=1), 1.0)
# p(x_{t+k} | x_{1:t}) =
# \sum_{z_{t+k}} p(x_{t+k} | z_{t+k}) p(z_{t+k} | x_{1:t})
log_likes = logsumexp(lls[k:] + np.log(pz_tpkt), axis=1)
# compute summed ll only over timepoints that are valid for each value of k
log_likes_0 = log_likes[k_max - k:]
log_likes_list.append(np.sum(log_likes_0))
k_step_lls += np.array(log_likes_list)
return k_step_lls
def k_step_r2(
model, datas, k_max, n_samp=10, obs_noise=True, disc_noise=True, return_type='total_r2'):
"""Determine the k-step ahead r2.
Args:
model:
datas:
k_max:
n_samp:
obs_noise: bool
turn observation noise on/off
disc_noise: bool
turn discrete state sampling on/off
return_type:
'per_batch_r2'
'total_r2'
'bootstrap_r2'
'per_batch_mse'
Returns:
"""
N = len(datas)
L = model.observations.lags # AR lags
D = model.D
x_true_total = []
x_pred_total = [[] for _ in range(k_max)]
if return_type == 'per_batch_r2':
k_step_r2s = np.zeros((N, k_max, n_samp))
elif return_type == 'total_r2':
k_step_r2s = np.zeros((k_max, n_samp))
else:
raise NotImplementedError('"%s" is not a valid return type' % return_type)
for d, data in enumerate(datas):
# print('%i/%i' % (d + 1, len(datas)))
T = data.shape[0]
x_true_all = data[L + k_max - 1: T + 1]
x_pred_all = np.zeros((n_samp, (T - 1), D, k_max))
if not disc_noise:
zs = model.most_likely_states(data)
inputs = np.zeros((T,) + (model.observations.M,))
# collect sampled data
for t in range(L - 1, T):
# find the most likely discrete state at time t based on its past
if disc_noise:
data_t = data[:t + 1]
zs = model.most_likely_states(data_t)[-L:]
else:
pass
# sample forward in time n_samp times
for n in range(n_samp):
# sample forward in time k_max steps
if disc_noise:
_, x_pred = model.sample(
k_max, prefix=(zs, data_t[-L:]), with_noise=obs_noise)
else:
pad = L
x_pred = np.concatenate((data[t - L + 1:t + 1], np.zeros((k_max, D))))
for k in range(pad, pad + k_max):
if t + 1 + k - pad < T:
x_pred[k, :] = model.observations.sample_x(
zs[t + 1 + k - pad], x_pred[:k], input=inputs[t], tag=None,
with_noise=obs_noise)
else:
# beyond the end of the data sample; return zeros
pass
x_pred = x_pred[pad:]
# predicted x values in the forward prediction time
x_pred_all[n, t - L + 1, :, :] = np.transpose(x_pred)[None, None, :, :]
# store predicted data
x_true_total.append(x_true_all)
for k in range(k_max):
idxs = (k_max - k - 1, k_max - k - 1 + x_true_all.shape[0])
x_pred_total[k].append(x_pred_all[:, slice(*idxs), :, k])
# compute r2s
if return_type == 'per_batch_r2':
for d in range(len(datas)):
for k in range(k_max):
for n in range(n_samp):
k_step_r2s[d, k, n] = r2_score(
x_true_total[d], x_pred_total[k][d][n])
elif return_type == 'total_r2':
for k in range(k_max):
for n in range(n_samp):
k_step_r2s[k, n] = r2_score(
np.vstack(x_true_total),
np.vstack([x_pred_total[k][d][n] for d in range(len(datas))]))
return k_step_r2s
# -------------------------------------------------------------------------------------------------
# path handling functions
# -------------------------------------------------------------------------------------------------
def get_model_name(n_states, model_kwargs):
trans = model_kwargs['transitions']
obs = model_kwargs['observations']
if obs.find('ar') > -1:
lags = model_kwargs['observation_kwargs']['lags']
else:
lags = 0
if trans == 'sticky':
kappa = model_kwargs['transition_kwargs']['kappa']
else:
kappa = ''
model_name = str(
'obs=%s_trans=%s_lags=%i_K=%02i' % (obs, trans, lags, n_states))
if trans == 'sticky':
model_name = str('%s_kappa=%1.0e' % (model_name, kappa))
return model_name
def plot_latents_states(
latents=None, states=None, state_probs=None, slc=(0, 1000), m=20):
"""
states | state probs | x coords | y coords
Args:
latents (dict): keys are 'x', 'y', 'l', each value is a TxD np array
states (np array): length T
state_probs (np array): T x K
"""
n_dlc_comp = latents.shape[1]
if state_probs is not None:
fig, axes = plt.subplots(
3, 1, figsize=(12, 10),
gridspec_kw={'height_ratios': [0.1, 0.1, 0.4]})
else:
fig, axes = plt.subplots(
2, 1, figsize=(10, 10),
gridspec_kw={'height_ratios': [0.1, 0.4]})
i = 0
axes[i].imshow(states[None, slice(*slc)], aspect='auto', cmap='tab20b')
axes[i].set_xticks([])
axes[i].set_yticks([])
axes[i].set_title('State')
# if state_probs is not None:
# i += 1
# n_states = state_probs.shape[1]
# xs_ = [np.arange(slc[0], slc[1]) for _ in range(n_states)]
# ys_ = [state_probs[slice(*slc), j] for j in range(n_states)]
# cs_ = [j for j in range(n_states)]
# _multiline(xs_, ys_, ax=axes[i], c=cs_, alpha=0.8, cmap='tab20b', lw=3)
# axes[i].set_xticks([])
# axes[i].set_xlim(slc[0], slc[1])
# axes[i].set_yticks([])
# axes[i].set_ylim(-0.1, 1.1)
# axes[i].set_title('State probabilities')
i += 1
behavior = m * latents / np.max(np.abs(latents)) + \
np.arange(latents.shape[1])
axes[i].plot(np.arange(slc[0], slc[1]), behavior[slice(*slc), :])
axes[i].set_xticks([])
axes[i].set_xlim(slc[0], slc[1])
axes[i].set_yticks([])
axes[i].set_ylim(0, n_dlc_comp + 1)
axes[-1].set_xlabel('Time (bins)')
plt.tight_layout()
plt.show()
return fig
```
#### File: search/nogamma/plot_search_nogamma.py
```python
import numpy as np
import os
from sklearn.cluster import KMeans
from behavenet.plotting.cond_ae_utils import (
plot_psvae_training_curves,
plot_label_reconstructions,
plot_hyperparameter_search_results,
make_latent_traversal_movie
)
from nogamma.hyperparameter_search_nogamma import (
hyperparameter_search
)
from nogamma.psvae_experiment_nogamma import PSvaeExperiment
from nogamma.search_utils_nogamma import (
load_latents_trials_frames,
list_hparams,
get_version_dir,
get_expt_dir_wrapper
)
def _cluster(latents, n_clusters=5):
# np.random.seed(0) # to reproduce clusters
kmeans = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)
distances = kmeans.fit_transform(latents)
clust_id = kmeans.predict(latents)
return distances, clust_id
def plot_psvae_training_curves_wrapper(lab, expt, animal, session, expt_name, n_ae_latents, rng_seeds_model, n_labels,
which_as_array='alphas', alpha=None, beta=None, save_file=None, **kwargs):
alphas, betas = list_hparams(lab, expt, animal, session, expt_name, n_ae_latents)
if save_file is None:
# save in expt_dir
save_dir = get_expt_dir_wrapper(lab, expt, animal, session, expt_name, n_ae_latents)
if which_as_array == 'alphas':
file_name = 'psvae_training_beta-{}'.format(beta)
else:
file_name = 'psvae_training_alpha-{}'.format(alpha)
save_file = os.path.join(save_dir, file_name)
print("Saving PS-VAE training graphs to {}".format(save_file))
n_ae_latents = [n_ae_latents - n_labels]
if alpha is None:
alpha = alphas[0]
if beta is None:
beta = betas[0]
if which_as_array == 'alphas':
try:
betas = [beta]
rng_seeds_model = rng_seeds_model[:1]
n_ae_latents = n_ae_latents[:1]
except TypeError:
pass
if which_as_array == 'betas':
try:
alphas = [alpha]
rng_seeds_model = rng_seeds_model[:1]
n_ae_latents = n_ae_latents[:1]
except TypeError:
pass
if which_as_array == 'rng_seeds_model':
try:
betas = [beta]
alphas = [alpha]
n_ae_latents = n_ae_latents[:1]
except TypeError:
pass
if which_as_array == 'n_ae_latents':
try:
betas = [beta]
rng_seeds_model = rng_seeds_model[:1]
alphas = [alpha]
except TypeError:
pass
plot_psvae_training_curves(lab, expt, animal, session, alphas, betas, n_ae_latents, rng_seeds_model,
expt_name, n_labels, save_file=save_file, **kwargs)
def plot_label_reconstructions_wrapper(lab, expt, animal, session, n_ae_latents, expt_name, n_labels, trials,
alpha, beta, save_file=None, **kwargs):
if save_file is None:
# save in each version_dir
save_dir = get_version_dir(lab, expt, animal, session, expt_name, n_ae_latents, alpha, beta)
file_name = 'label_reconstruction_alpha-{}_beta-{}'.format(alpha, beta)
save_file = os.path.join(save_dir, file_name)
print("Saving label reconstruction graphs to {}".format(save_file))
n_ae_latents -= n_labels
plot_label_reconstructions(lab, expt, animal, session, n_ae_latents, expt_name,
n_labels, trials, alpha=alpha, beta=beta, save_file=save_file, **kwargs)
def make_latent_traversal_movie_wrapper(lab, expt, animal, session, label_names, expt_name, n_ae_latents, alpha, beta,
model_class='ps-vae', n_clusters=10, rng_seed_model=0, save_file=None,
**kwargs):
if save_file is None:
# save in each version_dir
save_dir = get_version_dir(lab, expt, animal, session, expt_name, n_ae_latents, alpha, beta)
file_name = 'latent_movie_alpha-{}_beta-{}'.format(alpha, beta)
save_file = os.path.join(save_dir, file_name)
print("Saving latent traversal movie to {}".format(save_file))
movie_expt = PSvaeExperiment(lab, expt, animal, session, label_names, expt_name, n_ae_latents, alpha, beta,
model_class=model_class, **kwargs)
# need to test model_ae in load_latents_trials_frames
latents, trials, frames = load_latents_trials_frames(movie_expt.hparams, movie_expt.version)
traversal_trials = []
batch_idxs = []
distances, _ = _cluster(latents, n_clusters)
for clust in range(n_clusters):
frame_idx = np.argmin(distances[:, clust])
traversal_trials.append(trials[frame_idx])
batch_idxs.append(frames[frame_idx])
trial_idxs = [None] * len(traversal_trials)
make_latent_traversal_movie(lab, expt, animal, session, model_class, alpha, beta,
n_ae_latents - len(label_names), rng_seed_model, expt_name, len(label_names),
trial_idxs, batch_idxs, traversal_trials, save_file=save_file, **kwargs)
def plot_hyperparameter_search_results_wrapper(lab, expt, animal, session, n_labels, label_names, n_ae_latents,
expt_name, alpha, beta, save_file=None, beta_n_ae_latents=None,
beta_expt_name=None, batch_size=None, format='pdf', **kwargs):
if save_file is None:
# save in expt_dir
save_dir = get_expt_dir_wrapper(lab, expt, animal, session, expt_name, n_ae_latents)
file_name = 'hparam_search_results'
save_file = os.path.join(save_dir, file_name)
if beta_n_ae_latents is None:
beta_n_ae_latents = n_ae_latents - n_labels
if beta_expt_name is None:
beta_expt_name = expt_name
alpha_weights, beta_weights = list_hparams(lab, expt, animal, session, expt_name, n_ae_latents)
alpha_n_ae_latents = [n_ae_latents - n_labels]
plot_hyperparameter_search_results(lab, expt, animal, session, n_labels, label_names, alpha_weights,
alpha_n_ae_latents, expt_name, beta_weights,
beta_n_ae_latents, beta_expt_name, alpha, beta, save_file,
batch_size=batch_size, format=format, **kwargs)
def plot_and_film_best(lab, expt, animal, session, label_names, expt_name, n_ae_latents, trials, beta_start=1,
model_class='ps-vae', n_clusters=5, rng_seed_model=0, rng_seeds_model=None, save_file=None,
**kwargs):
if rng_seeds_model is None:
rng_seeds_model = [rng_seed_model]
alphas, betas = list_hparams(lab, expt, animal, session, expt_name, n_ae_latents)
print(alphas, betas)
alpha, beta = hyperparameter_search(lab, expt, animal, session, label_names, expt_name, n_ae_latents,
alphas, betas, beta=beta_start, **kwargs)
print("Using alpha: {} and beta: {} from hyperparameter search".format(alpha, beta))
# psvae training curves, plot across alphas for default beta and betas for best alpha, save in expt_dir
plot_psvae_training_curves_wrapper(lab, expt, animal, session, expt_name, n_ae_latents, rng_seeds_model,
len(label_names), which_as_array='alphas', beta=10, **kwargs)
plot_psvae_training_curves_wrapper(lab, expt, animal, session, expt_name, n_ae_latents, rng_seeds_model,
len(label_names), which_as_array='betas', alpha=alpha, **kwargs)
# save hparam plots in expt_name directory, one for each alpha with beta=1
plot_hyperparameter_search_results_wrapper(lab, expt, animal, session, len(label_names), label_names,
n_ae_latents, expt_name, alpha, beta, **kwargs)
# make label reconstruction graphs for versions
for alpha_ in alphas:
for beta_ in betas:
print("Plotting label reconstructions for alpha: {} and beta: {}".format(alpha_, beta_))
plot_label_reconstructions_wrapper(lab, expt, animal, session, n_ae_latents, expt_name, len(label_names), trials,
alpha_, beta_, **kwargs)
# make latent traversal movies for versions
for alpha_ in alphas:
for beta_ in betas:
print("Making latent traversal movie for alpha: {} and beta: {}".format(alpha_, beta_))
make_latent_traversal_movie_wrapper(lab, expt, animal, session, label_names, expt_name, n_ae_latents,
alpha_, beta_, model_class=model_class,
n_clusters=n_clusters, rng_seed_model=rng_seed_model, **kwargs)
```
#### File: tests/test_models/test_ae_model_architecture_generator.py
```python
import os
import pytest
import numpy as np
import behavenet.models.ae_model_architecture_generator as utils
def test_draw_archs():
n_archs = 3
n_ae_latents = 6
# no check memory
archs = utils.draw_archs(
batch_size=100, input_dim=[2, 32, 32], n_ae_latents=n_ae_latents, n_archs=n_archs,
check_memory=False, mem_limit_gb=None)
assert len(archs) == n_archs
for arch1 in archs:
assert arch1['n_ae_latents'] == n_ae_latents
matching = 0
for arch2 in archs:
if arch1 == arch2:
matching += 1
assert matching == 1
# check memory
mem_limit_gb = 1
archs = utils.draw_archs(
batch_size=100, input_dim=[2, 32, 32], n_ae_latents=n_ae_latents, n_archs=n_archs,
check_memory=True, mem_limit_gb=mem_limit_gb)
assert len(archs) == n_archs
for arch1 in archs:
assert arch1['n_ae_latents'] == n_ae_latents
assert arch1['mem_size_gb'] < mem_limit_gb
matching = 0
for arch2 in archs:
if arch1 == arch2:
matching += 1
assert matching == 1
def test_get_possible_arch():
input_dim = [2, 32, 32]
arch_seed = 0
# proper functioning
n_ae_latents = 6
arch = utils.get_possible_arch(input_dim, n_ae_latents, arch_seed)
assert arch['n_ae_latents'] == n_ae_latents
# raise exception if too many latents (max = 64)
n_ae_latents = 65
with pytest.raises(ValueError):
utils.get_possible_arch(input_dim, n_ae_latents, arch_seed)
def test_get_encoding_block_conv():
input_dim = [2, 32, 32]
n_ae_latents = 6
# possible options for the architecture
opts = {}
opts['possible_kernel_sizes'] = np.asarray([3, 5])
opts['possible_strides'] = np.asarray([1, 2])
opts['possible_strides_probs'] = np.asarray([0.1, 0.9])
opts['possible_max_pool_sizes'] = np.asarray([2])
opts['possible_n_channels'] = np.asarray([16, 32, 64, 128])
opts['prob_stopping'] = np.arange(0, 1, .05)
opts['max_latents'] = 64
arch = {}
arch['ae_input_dim'] = input_dim
arch['model_type'] = 'conv'
arch['n_ae_latents'] = n_ae_latents
arch['ae_decoding_last_FF_layer'] = 0
arch['ae_network_type'] = 'strides_only'
arch['ae_padding_type'] = 'valid'
# using correct options (all convolutional)
np.random.seed(4)
arch = utils.get_encoding_conv_block(arch, opts)
for i in range(len(arch['ae_encoding_n_channels'])):
assert arch['ae_encoding_layer_type'][i] in ['conv']
assert arch['ae_encoding_n_channels'][i] in opts['possible_n_channels']
assert arch['ae_encoding_kernel_size'][i] in opts['possible_kernel_sizes']
assert arch['ae_encoding_stride_size'][i] in opts['possible_strides']
# usng correct options (with maxpool)
np.random.seed(6)
arch['ae_network_type'] = 'max_pooling'
arch = utils.get_encoding_conv_block(arch, opts)
for i in range(len(arch['ae_encoding_n_channels'])):
assert arch['ae_encoding_layer_type'][i] in ['conv', 'maxpool']
assert arch['ae_encoding_n_channels'][i] in opts['possible_n_channels']
if arch['ae_encoding_layer_type'][i] == 'conv':
assert arch['ae_encoding_kernel_size'][i] in opts['possible_kernel_sizes']
assert arch['ae_encoding_stride_size'][i] in opts['possible_strides']
else:
assert arch['ae_encoding_kernel_size'][i] in opts['possible_max_pool_sizes']
assert arch['ae_encoding_stride_size'][i] in opts['possible_max_pool_sizes']
def test_get_decoding_conv_block():
input_dim = [2, 128, 128]
n_ae_latents = 6
# possible options for the architecture
opts = {}
opts['possible_kernel_sizes'] = np.asarray([3, 5])
opts['possible_strides'] = np.asarray([1, 2])
opts['possible_strides_probs'] = np.asarray([0.1, 0.9])
opts['possible_max_pool_sizes'] = np.asarray([2])
opts['possible_n_channels'] = np.asarray([16, 32, 64, 128])
opts['prob_stopping'] = np.arange(0, 1, .05)
opts['max_latents'] = 64
arch = {}
arch['ae_input_dim'] = input_dim
arch['model_type'] = 'conv'
arch['n_ae_latents'] = n_ae_latents
arch['ae_decoding_last_FF_layer'] = 0
arch['ae_network_type'] = 'strides_only'
arch['ae_padding_type'] = 'valid'
# using correct options (all convolutional)
np.random.seed(16)
arch = utils.get_encoding_conv_block(arch, opts)
arch = utils.get_decoding_conv_block(arch)
assert arch['ae_decoding_n_channels'][-1] == input_dim[0]
for i in range(len(arch['ae_decoding_n_channels']) - 1):
assert arch['ae_decoding_layer_type'][i] in ['convtranspose']
assert arch['ae_decoding_n_channels'][i] == arch['ae_encoding_n_channels'][-2 - i]
assert arch['ae_decoding_kernel_size'][i] == arch['ae_encoding_kernel_size'][-1 - i]
assert arch['ae_decoding_stride_size'][i] == arch['ae_encoding_stride_size'][-1 - i]
# using correct options (with maxpool)
np.random.seed(16)
arch['ae_network_type'] = 'max_pooling'
arch = utils.get_encoding_conv_block(arch, opts)
arch = utils.get_decoding_conv_block(arch)
print(arch)
for i in range(len(arch['ae_decoding_n_channels']) - 1):
assert arch['ae_decoding_layer_type'][i] in ['convtranspose', 'unpool']
assert arch['ae_decoding_n_channels'][i] == arch['ae_encoding_n_channels'][-2 - i]
assert arch['ae_decoding_kernel_size'][i] == arch['ae_encoding_kernel_size'][-1 - i]
assert arch['ae_decoding_stride_size'][i] == arch['ae_encoding_stride_size'][-1 - i]
# using correct options (with final ff layer)
arch['ae_decoding_last_FF_layer'] = True
arch = utils.get_decoding_conv_block(arch)
assert arch['ae_decoding_n_channels'][-1] == 16
def test_calculate_output_dim():
# try all even/odd combos for input_dim/kernel/stride
# ----------------------
# conv layers - same
# ----------------------
input_dim, kernel, stride = 16, 4, 3
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'same', 'conv')
assert out == 6
assert before == 1
assert after == 2
input_dim, kernel, stride = 17, 4, 3
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'same', 'conv')
assert out == 6
assert before == 1
assert after == 1
input_dim, kernel, stride = 16, 3, 3
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'same', 'conv')
assert out == 6
assert before == 1
assert after == 1
input_dim, kernel, stride = 17, 3, 3
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'same', 'conv')
assert out == 6
assert before == 0
assert after == 1
input_dim, kernel, stride = 16, 4, 2
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'same', 'conv')
assert out == 8
assert before == 1
assert after == 1
input_dim, kernel, stride = 17, 4, 2
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'same', 'conv')
assert out == 9
assert before == 1
assert after == 2
input_dim, kernel, stride = 16, 3, 2
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'same', 'conv')
assert out == 8
assert before == 0
assert after == 1
input_dim, kernel, stride = 17, 3, 2
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'same', 'conv')
assert out == 9
assert before == 1
assert after == 1
# ----------------------
# conv layers - valid
# ----------------------
input_dim, kernel, stride = 16, 4, 3
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'valid', 'conv')
assert out == 5
assert before == 0
assert after == 0
input_dim, kernel, stride = 17, 4, 3
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'valid', 'conv')
assert out == 5
assert before == 0
assert after == 0
input_dim, kernel, stride = 16, 3, 3
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'valid', 'conv')
assert out == 5
assert before == 0
assert after == 0
input_dim, kernel, stride = 17, 3, 3
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'valid', 'conv')
assert out == 5
assert before == 0
assert after == 0
input_dim, kernel, stride = 16, 4, 2
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'valid', 'conv')
assert out == 7
assert before == 0
assert after == 0
input_dim, kernel, stride = 17, 4, 2
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'valid', 'conv')
assert out == 7
assert before == 0
assert after == 0
input_dim, kernel, stride = 16, 3, 2
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'valid', 'conv')
assert out == 7
assert before == 0
assert after == 0
input_dim, kernel, stride = 17, 3, 2
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'valid', 'conv')
assert out == 8
assert before == 0
assert after == 0
# ----------------------
# conv layers - other
# ----------------------
with pytest.raises(NotImplementedError):
utils.calculate_output_dim(input_dim, kernel, stride, 'test', 'conv')
# ----------------------
# maxpool layers - kern
# ----------------------
with pytest.raises(NotImplementedError):
utils.calculate_output_dim(input_dim, 3, stride, 'test', 'conv')
# ----------------------
# maxpool layers - same
# ----------------------
input_dim, kernel, stride = 16, 2, 3
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'same', 'maxpool')
assert out == 6
assert before == 0
assert after == 0
input_dim, kernel, stride = 17, 2, 3
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'same', 'maxpool')
assert out == 6
assert before == 0
assert after == 0
input_dim, kernel, stride = 16, 2, 2
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'same', 'maxpool')
assert out == 8
assert before == 0
assert after == 0
input_dim, kernel, stride = 17, 2, 2
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'same', 'maxpool')
assert out == 9
assert before == 0
assert after == 0
# ----------------------
# maxpool layers - valid
# ----------------------
input_dim, kernel, stride = 16, 2, 3
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'valid', 'maxpool')
assert out == 5
assert before == 0
assert after == 0
input_dim, kernel, stride = 17, 2, 3
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'valid', 'maxpool')
assert out == 6
assert before == 0
assert after == 0
input_dim, kernel, stride = 16, 2, 2
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'valid', 'maxpool')
assert out == 8
assert before == 0
assert after == 0
input_dim, kernel, stride = 17, 2, 2
out, before, after = utils.calculate_output_dim(input_dim, kernel, stride, 'valid', 'maxpool')
assert out == 8
assert before == 0
assert after == 0
# ----------------------
# maxpool layers - other
# ----------------------
with pytest.raises(NotImplementedError):
utils.calculate_output_dim(input_dim, kernel, stride, 'test', 'maxpool')
# ----------------------
# other layers
# ----------------------
with pytest.raises(NotImplementedError):
utils.calculate_output_dim(input_dim, kernel, stride, 'valid', 'test')
def test_estimate_model_footprint():
from behavenet.models.aes import AE
n_archs = 3
input_dim = [2, 128, 128]
n_ae_latents = 12
archs = utils.draw_archs(
batch_size=100, input_dim=input_dim, n_ae_latents=n_ae_latents, n_archs=n_archs,
check_memory=False, mem_limit_gb=20)
for arch in archs:
arch['model_class'] = 'ae'
arch['n_input_channels'] = input_dim[0]
arch['y_pixels'] = input_dim[1]
arch['x_pixels'] = input_dim[2]
model = AE(archs[0])
f0 = utils.estimate_model_footprint(model, tuple([100] + input_dim))
assert 290 < f0 / 1e6 < 310
model = AE(archs[1])
f1 = utils.estimate_model_footprint(model, tuple([100] + input_dim))
assert 600 < f1 / 1e6 < 650
model = AE(archs[2])
f2 = utils.estimate_model_footprint(model, tuple([100] + input_dim))
assert 1000 < f2 / 1e6 < 1200
# cutoff size
f2a = utils.estimate_model_footprint(model, tuple([100] + input_dim), 0.1)
assert 100 < f2a < f2
def test_get_handcrafted_dims():
# symmetric arch
arch0 = utils.load_default_arch()
arch0['ae_input_dim'] = [2, 128, 128]
arch0 = utils.get_handcrafted_dims(arch0, symmetric=True)
assert arch0['ae_encoding_x_dim'] == [64, 32, 16, 8, 2]
assert arch0['ae_encoding_y_dim'] == [64, 32, 16, 8, 2]
assert arch0['ae_encoding_x_padding'] == [(1, 2), (1, 2), (1, 2), (1, 2), (1, 1)]
assert arch0['ae_encoding_y_padding'] == [(1, 2), (1, 2), (1, 2), (1, 2), (1, 1)]
assert arch0['ae_decoding_x_dim'] == [8, 16, 32, 64, 128]
assert arch0['ae_decoding_y_dim'] == [8, 16, 32, 64, 128]
assert arch0['ae_decoding_x_padding'] == [(1, 1), (1, 2), (1, 2), (1, 2), (1, 2)]
assert arch0['ae_decoding_y_padding'] == [(1, 1), (1, 2), (1, 2), (1, 2), (1, 2)]
# asymmetric arch (TODO: source code not updated)
arch1 = utils.load_default_arch()
arch1['ae_input_dim'] = [2, 128, 128]
arch1['ae_decoding_n_channels'] = [64, 32, 32]
arch1['ae_decoding_kernel_size'] = [5, 5, 5]
arch1['ae_decoding_stride_size'] = [2, 2, 2]
arch1['ae_decoding_layer_type'] = ['conv', 'conv', 'conv']
arch1['ae_decoding_starting_dim'] = [1, 8, 8]
arch1 = utils.get_handcrafted_dims(arch1, symmetric=False)
assert arch1['ae_encoding_x_dim'] == [64, 32, 16, 8, 2]
assert arch1['ae_encoding_y_dim'] == [64, 32, 16, 8, 2]
assert arch1['ae_encoding_x_padding'] == [(1, 2), (1, 2), (1, 2), (1, 2), (1, 1)]
assert arch1['ae_encoding_y_padding'] == [(1, 2), (1, 2), (1, 2), (1, 2), (1, 1)]
assert arch1['ae_decoding_x_dim'] == [15, 29, 57]
assert arch1['ae_decoding_y_dim'] == [15, 29, 57]
assert arch1['ae_decoding_x_padding'] == [(2, 2), (2, 2), (2, 2)]
assert arch1['ae_decoding_y_padding'] == [(2, 2), (2, 2), (2, 2)]
# raise exception if asymmetric arch and max pooling
arch2 = utils.load_default_arch()
arch2['ae_input_dim'] = [2, 128, 128]
arch2['ae_network_type'] = 'max_pooling'
with pytest.raises(NotImplementedError):
utils.get_handcrafted_dims(arch2, symmetric=False)
def test_load_handcrafted_arch():
input_dim = [2, 128, 128]
n_ae_latents = 12
# use default arch
ae_arch_json = None
arch = utils.load_handcrafted_arch(input_dim, n_ae_latents, ae_arch_json, check_memory=False)
assert arch['n_input_channels'] == input_dim[0]
assert arch['y_pixels'] == input_dim[1]
assert arch['x_pixels'] == input_dim[2]
assert arch['ae_input_dim'] == input_dim
assert arch['n_ae_latents'] == n_ae_latents
assert arch['ae_encoding_n_channels'] == [32, 64, 128, 256, 512]
# load arch from json
ae_arch_json = os.path.join(
os.getcwd(), 'configs', 'ae_jsons', 'ae_arch_2.json')
arch = utils.load_handcrafted_arch(input_dim, n_ae_latents, ae_arch_json, check_memory=False)
assert arch['n_input_channels'] == input_dim[0]
assert arch['y_pixels'] == input_dim[1]
assert arch['x_pixels'] == input_dim[2]
assert arch['ae_input_dim'] == input_dim
assert arch['n_ae_latents'] == n_ae_latents
assert arch['ae_encoding_n_channels'] == [64, 64, 64, 64, 64]
# use default arch when json does not exist
ae_arch_json = os.path.join(
os.getcwd(), 'configs', 'ae_jsons', 'ae_arch_3.json')
arch = utils.load_handcrafted_arch(input_dim, n_ae_latents, ae_arch_json, check_memory=False)
assert arch['n_input_channels'] == input_dim[0]
assert arch['y_pixels'] == input_dim[1]
assert arch['x_pixels'] == input_dim[2]
assert arch['ae_input_dim'] == input_dim
assert arch['n_ae_latents'] == n_ae_latents
assert arch['ae_encoding_n_channels'] == [32, 64, 128, 256, 512]
# check memory runs
ae_arch_json = None
arch = utils.load_handcrafted_arch(
input_dim, n_ae_latents, ae_arch_json, check_memory=True, batch_size=10, mem_limit_gb=20)
assert arch['n_input_channels'] == input_dim[0]
assert arch['y_pixels'] == input_dim[1]
assert arch['x_pixels'] == input_dim[2]
assert arch['ae_input_dim'] == input_dim
assert arch['n_ae_latents'] == n_ae_latents
assert arch['ae_encoding_n_channels'] == [32, 64, 128, 256, 512]
# raise exception when not enough gpu memory
ae_arch_json = None
with pytest.raises(ValueError):
utils.load_handcrafted_arch(
input_dim, n_ae_latents, ae_arch_json,
check_memory=True, batch_size=10, mem_limit_gb=0.1)
def test_load_default_arch():
required_keys = [
'ae_network_type',
'ae_padding_type',
'ae_batch_norm',
'ae_batch_norm_momentum',
'symmetric_arch',
'ae_encoding_n_channels',
'ae_encoding_kernel_size',
'ae_encoding_stride_size',
'ae_encoding_layer_type',
'ae_decoding_last_FF_layer']
arch = utils.load_default_arch()
returned_keys = list(arch.keys())
for key in required_keys:
assert key in returned_keys
```
#### File: tests/test_plotting/test_arhmm_utils.py
```python
import numpy as np
from behavenet.plotting import arhmm_utils
def test_get_discrete_chunks():
states = [
np.array([0, 1, 1, 1, 2, 2, 0]),
np.array([3, 3, 3, 4, 4, 2, 2, 2])
]
chunks = arhmm_utils.get_discrete_chunks(states, include_edges=True)
assert np.all(chunks[0] == np.array([[0, 0, 1], [0, 6, 7]]))
assert np.all(chunks[1] == np.array([[0, 1, 4]]))
assert np.all(chunks[2] == np.array([[0, 4, 6], [1, 5, 8]]))
assert np.all(chunks[3] == np.array([[1, 0, 3]]))
assert np.all(chunks[4] == np.array([[1, 3, 5]]))
chunks = arhmm_utils.get_discrete_chunks(states, include_edges=False)
assert np.all(chunks[0] == np.array([]))
assert np.all(chunks[1] == np.array([[0, 1, 4]]))
assert np.all(chunks[2] == np.array([[0, 4, 6]]))
assert np.all(chunks[3] == np.array([]))
assert np.all(chunks[4] == np.array([[1, 3, 5]]))
def test_get_state_durations():
# construct mock HMM class that passes argument through function `most_likely_states`
class HMM(object):
@classmethod
def most_likely_states(cls, x):
return x
hmm = HMM()
hmm.K = 4
latents = [
np.array([0, 1, 1, 1, 2, 2, 0]),
np.array([3, 3, 3, 4, 4, 2, 2, 2]),
np.array([0, 0, 0, 3, 3, 3, 1, 1, 2])
]
durations = arhmm_utils.get_state_durations(latents, hmm, include_edges=True)
assert np.all(durations[0] == np.array([1, 1, 3]))
assert np.all(durations[1] == np.array([3, 2]))
assert np.all(durations[2] == np.array([2, 3, 1]))
assert np.all(durations[3] == np.array([3, 3]))
durations = arhmm_utils.get_state_durations(latents, hmm, include_edges=False)
assert np.all(durations[0] == np.array([]))
assert np.all(durations[1] == np.array([3, 2]))
assert np.all(durations[2] == np.array([2]))
assert np.all(durations[3] == np.array([3]))
hmm.K = 1
durations = arhmm_utils.get_state_durations(latents, hmm)
assert len(durations) == 0
``` |
{
"source": "john-m24/InferenceBenchmark",
"score": 2
} |
#### File: src/algos/complex_nn.py
```python
from algos.simba_algo import SimbaDefence
import torch.nn.functional as F
import torch
import torch.nn as nn
import numpy as np
from models.complex_models import Discriminator, RealToComplex, ComplexToReal, ResNetEncoderComplex, ResNetDecoderComplex
def get_encoder_output_size(encoder, dims):
x = torch.randn((1,)+dims)
with torch.no_grad():
out = encoder(x)
if type(out) == tuple:
out = out[0]
return list(out.size())[1:]
class ComplexNN(SimbaDefence):
def __init__(self, config, utils) -> None:
super(ComplexNN, self).__init__(utils)
self.initialize(config)
def initialize(self, config):
self.optimizer_idx = 0
self.encoder_model,self.decoder_model = self.init_client_model(config)
img_size = config["img_size"]
size = get_encoder_output_size(self.encoder_model, (3,img_size,img_size))
self.discriminator = Discriminator(size=size)
models = [self.encoder_model, self.decoder_model, self.discriminator]
self.put_on_gpus(models)
self.utils.register_model("encoder_model", self.encoder_model)
self.utils.register_model("discriminator_model", self.discriminator)
self.utils.register_model("decoder_model", self.decoder_model)
self.optim_encoder , self.optim_decoder , self.optim_discriminator = self.init_optim(config, self.encoder_model, self.decoder_model, self.discriminator)
self.real_to_complex = RealToComplex()
self.complex_to_real = ComplexToReal()
self.loss_fn = F.cross_entropy
self.alpha = config["alpha"]
self.k = config["k"]
self.loss_tag = "decoder_loss"
self.acc_tag = "decoder_acc"
tags = [self.loss_tag, self.acc_tag]
for tag in tags:
self.utils.logger.register_tag("train/" + tag)
self.utils.logger.register_tag("val/" + tag)
def put_on_gpus(self,models):
for model in models:
model = self.utils.model_on_gpus(model)
def init_client_model(self, config):
if config["model_name"] == "resnet20complex":
encoder_model = ResNetEncoderComplex(3)
decoder_model = ResNetDecoderComplex(3, config["logits"], "alpha")
else:
print("can't find complex client model")
exit()
return encoder_model,decoder_model
def init_optim(self, config, encoder, decoder, discriminator):
encoder_parameters = encoder.parameters()
decoder_parameters = decoder.parameters()
if config["optimizer"] == "adam":
optimizer_e = torch.optim.Adam(encoder_parameters,
lr=config["lr"],
)
optimizer_decoder = torch.optim.Adam(decoder_parameters)
optimizer_discriminator = torch.optim.Adam(
discriminator.parameters(),
lr=config["lr"],
)
else:
print("Unknown optimizer {}".format(config["optimizer"]))
return optimizer_e,optimizer_decoder,optimizer_discriminator
def train(self):
self.mode = "train"
self.encoder_model.train()
self.decoder_model.train()
def eval(self):
self.mode = "val"
self.encoder_model.eval()
self.decoder_model.eval()
def forward(self, items):
inp = items["x"]
# Pass through encoder
a = self.encoder_model(inp)
self.a = a
# Shuffle batch elements of a to create b
with torch.no_grad():
indices = np.random.permutation(a.size(0))
b = a[indices]
self.z, self.theta = self.real_to_complex(a,b)
# Get discriminator score expectation over k rotations
self.score_fake = 0
for k in range(self.k):
# Shuffle batch to get b
indices = np.random.permutation(a.size(0))
b = a[indices]
# Rotate a
x, _ = self.real_to_complex(a,b)
a_rotated = x[:,0]
# Get discriminator score
self.score_fake += self.discriminator(a_rotated)
self.score_fake /= self.k # Average score
z = self.z.detach()
z.requires_grad = True
return z
def infer(self, h, labels):
h.retain_grad()
y = self.complex_to_real(h,self.theta)
y.retain_grad()
self.preds = self.decoder_model(y)
self.acc = (self.preds.argmax(dim=1) == labels).sum().item() / self.preds.shape[0]
self.utils.logger.add_entry(self.mode + "/" + self.acc_tag, self.acc)
if self.optimizer_idx%2 == 0:
g_loss_adv = -torch.mean(self.score_fake)
g_loss_ce = self.loss_fn(self.preds,labels)
loss = g_loss_adv + g_loss_ce
self.optim_decoder.zero_grad()
loss.backward(retain_graph=True)
self.optim_decoder.step()
self.utils.logger.add_entry(self.mode + "/" + self.loss_tag, loss.item())
return h.grad
else:
for p in self.discriminator.parameters():
p.data.clamp_(-0.01, 0.01)
self.d_loss_adv = -torch.mean(self.discriminator(self.a)) + torch.mean(self.score_fake)
self.optim_discriminator.zero_grad()
self.d_loss_adv.backward()
self.optim_discriminator.step()
return None
def backward(self, items):
if self.optimizer_idx%2 == 0:
self.optim_encoder.zero_grad()
self.z.backward(items["server_grads"])
self.optim_encoder.step()
self.optimizer_idx += 1
``` |
{
"source": "johnmacnamararseg/pipelines",
"score": 2
} |
#### File: gcp_launcher/utils/test_json_util.py
```python
import json
import unittest
from unittest import mock
from google_cloud_pipeline_components.container.v1.gcp_launcher.utils import json_util
class JsonUtilTests(unittest.TestCase):
def test_recursive_remove_empty(self):
payload = '{"display_name": "train_deploy1630230", "description": "", "predict_schemata": {"instance_schema_uri": "", "parameters_schema_uri": "", "prediction_schema_uri": ""}, "container_spec": {"image_uri": "us-docker.pkg.dev/cloud-aiplatform/prediction/tf2-cpu.2-3:latest", "command": "", "args": "", "env": "", "ports": "", "predict_route": "", "health_route": ""}, "artifact_uri": "gs://managed-pipeline-test-bugbash/pipeline_root/yangpa/1630225419", "explanation_spec": {"parameters": {}, "metadata": {}}, "encryption_spec": {"kms_key_name":""}, "default_int": 0}'
payload_json = json.loads(payload, strict=False)
payload_json_after = json_util.recursive_remove_empty(payload_json)
self.assertEqual(
json.dumps(payload_json_after),
'{"display_name": "train_deploy1630230", "container_spec": {"image_uri": "us-docker.pkg.dev/cloud-aiplatform/prediction/tf2-cpu.2-3:latest"}, "artifact_uri": "gs://managed-pipeline-test-bugbash/pipeline_root/yangpa/1630225419"}'
)
payload = '["abc","def", ""]'
payload_json = json.loads(payload, strict=False)
payload_json_after = json_util.recursive_remove_empty(payload_json)
self.assertEqual(json.dumps(payload_json_after), '["abc", "def", ""]')
payload = '"abc"'
payload_json = json.loads(payload, strict=False)
payload_json_after = json_util.recursive_remove_empty(payload_json)
self.assertEqual(json.dumps(payload_json_after), '"abc"')
def test_dont_remove_array_with_zero(self):
payload = '{"explanation_spec":{"parameters":{"sampled_shapley_attribution":{"path_count":7}},"metadata":{"inputs":{"ps_calc_14":{"input_baselines":[0.0],"input_tensor_name":"","encoding":0,"modality":"","indices_tensor_name":"","dense_shape_tensor_name":"","index_feature_mapping":[],"encoded_tensor_name":"","encoded_baselines":[],"group_name":""}},"outputs":{"scores":{"display_name_mapping_key":"classes","output_tensor_name":""}},"feature_attributions_schema_uri":"gs://sample/gcs/path/feature_attributions.yaml"}}}'
payload_json = json.loads(payload, strict=False)
payload_json_after = json_util.recursive_remove_empty(payload_json)
self.assertEqual(
json.dumps(payload_json_after),
'{"explanation_spec": {"parameters": {"sampled_shapley_attribution": {"path_count": 7}}, "metadata": {"inputs": {"ps_calc_14": {"input_baselines": [0.0]}}, "outputs": {"scores": {"display_name_mapping_key": "classes"}}, "feature_attributions_schema_uri": "gs://sample/gcs/path/feature_attributions.yaml"}}}'
)
```
#### File: kfp/cli/experiment.py
```python
import click
from kfp import client
from kfp.cli import output
from kfp.cli.utils import parsing
from kfp_server_api.models.api_experiment import ApiExperiment
@click.group()
def experiment():
"""Manage experiment resources."""
pass
@experiment.command()
@click.option(
'-d',
'--description',
help=parsing.get_param_descr(client.Client.create_experiment,
'description'))
@click.argument('name')
@click.pass_context
def create(ctx: click.Context, description: str, name: str):
"""Create an experiment."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
experiment = client_obj.create_experiment(name, description=description)
output.print_output(
experiment,
output.ModelType.EXPERIMENT,
output_format,
)
@experiment.command()
@click.option(
'--page-token',
default='',
help=parsing.get_param_descr(client.Client.list_experiments, 'page_token'))
@click.option(
'-m',
'--max-size',
default=100,
help=parsing.get_param_descr(client.Client.list_experiments, 'page_size'))
@click.option(
'--sort-by',
default='created_at desc',
help=parsing.get_param_descr(client.Client.list_experiments, 'sort_by'))
@click.option(
'--filter',
help=parsing.get_param_descr(client.Client.list_experiments, 'filter'))
@click.pass_context
def list(ctx: click.Context, page_token: str, max_size: int, sort_by: str,
filter: str):
"""List experiments."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
response = client_obj.list_experiments(
page_token=page_token,
page_size=max_size,
sort_by=sort_by,
filter=filter)
output.print_output(
response.experiments or [],
output.ModelType.EXPERIMENT,
output_format,
)
@experiment.command()
@click.argument('experiment-id')
@click.pass_context
def get(ctx: click.Context, experiment_id: str):
"""Get information about an experiment."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
experiment = client_obj.get_experiment(experiment_id)
output.print_output(
experiment,
output.ModelType.EXPERIMENT,
output_format,
)
@experiment.command()
@click.argument('experiment-id')
@click.pass_context
def delete(ctx: click.Context, experiment_id: str):
"""Delete an experiment."""
confirmation = 'Caution. The RunDetails page could have an issue' \
' when it renders a run that has no experiment.' \
' Do you want to continue?'
if not click.confirm(confirmation):
return
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
client_obj.delete_experiment(experiment_id)
output.print_deleted_text('experiment', experiment_id, output_format)
either_option_required = 'Either --experiment-id or --experiment-name is required.'
@experiment.command()
@click.option(
'--experiment-id',
default=None,
help=parsing.get_param_descr(client.Client.archive_experiment,
'experiment_id') + ' ' + either_option_required
)
@click.option(
'--experiment-name',
default=None,
help='Name of the experiment.' + ' ' + either_option_required)
@click.pass_context
def archive(ctx: click.Context, experiment_id: str, experiment_name: str):
"""Archive an experiment."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
if (experiment_id is None) == (experiment_name is None):
raise ValueError(either_option_required)
if not experiment_id:
experiment = client_obj.get_experiment(experiment_name=experiment_name)
experiment_id = experiment.id
client_obj.archive_experiment(experiment_id=experiment_id)
if experiment_id:
experiment = client_obj.get_experiment(experiment_id=experiment_id)
else:
experiment = client_obj.get_experiment(experiment_name=experiment_name)
output.print_output(
experiment,
output.ModelType.EXPERIMENT,
output_format,
)
@experiment.command()
@click.option(
'--experiment-id',
default=None,
help=parsing.get_param_descr(client.Client.unarchive_experiment,
'experiment_id') + ' ' + either_option_required
)
@click.option(
'--experiment-name',
default=None,
help='Name of the experiment.' + ' ' + either_option_required)
@click.pass_context
def unarchive(ctx: click.Context, experiment_id: str, experiment_name: str):
"""Unarchive an experiment."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
if (experiment_id is None) == (experiment_name is None):
raise ValueError(either_option_required)
if not experiment_id:
experiment = client_obj.get_experiment(experiment_name=experiment_name)
experiment_id = experiment.id
client_obj.unarchive_experiment(experiment_id=experiment_id)
if experiment_id:
experiment = client_obj.get_experiment(experiment_id=experiment_id)
else:
experiment = client_obj.get_experiment(experiment_name=experiment_name)
output.print_output(
experiment,
output.ModelType.EXPERIMENT,
output_format,
)
```
#### File: kfp/cli/pipeline.py
```python
from typing import Optional
import click
from kfp import client
from kfp.cli import output
from kfp.cli.utils import deprecated_alias_group
from kfp.cli.utils import parsing
@click.group(
cls=deprecated_alias_group.deprecated_alias_group_factory({
'upload': 'create',
'upload-version': 'create-version'
}))
def pipeline():
"""Manage pipeline resources."""
pass
@pipeline.command()
@click.option(
'-p',
'--pipeline-name',
help=parsing.get_param_descr(client.Client.upload_pipeline,
'pipeline_name'))
@click.option(
'-d',
'--description',
help=parsing.get_param_descr(client.Client.upload_pipeline, 'description'))
@click.argument('package-file')
@click.pass_context
def create(ctx: click.Context,
pipeline_name: str,
package_file: str,
description: str = None):
"""Upload a pipeline."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
pipeline = client_obj.upload_pipeline(package_file, pipeline_name,
description)
output.print_output(
pipeline,
output.ModelType.PIPELINE,
output_format,
)
either_option_required = 'Either --pipeline-id or --pipeline-name is required.'
@pipeline.command()
@click.argument('package-file', type=click.Path(exists=True, dir_okay=False))
@click.option(
'-v',
'--pipeline-version',
help=parsing.get_param_descr(client.Client.upload_pipeline_version,
'pipeline_version_name'),
required=True,
)
@click.option(
'-p',
'--pipeline-id',
required=False,
help=parsing.get_param_descr(client.Client.upload_pipeline_version,
'pipeline_id') + ' ' + either_option_required)
@click.option(
'-n',
'--pipeline-name',
required=False,
help=parsing.get_param_descr(client.Client.upload_pipeline_version,
'pipeline_name') + ' ' + either_option_required
)
@click.option(
'-d',
'--description',
help=parsing.get_param_descr(client.Client.upload_pipeline_version,
'description'))
@click.pass_context
def create_version(ctx: click.Context,
package_file: str,
pipeline_version: str,
pipeline_id: Optional[str] = None,
pipeline_name: Optional[str] = None,
description: Optional[str] = None):
"""Upload a version of a pipeline."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
if bool(pipeline_id) == bool(pipeline_name):
raise ValueError(either_option_required)
if pipeline_name is not None:
pipeline_id = client_obj.get_pipeline_id(name=pipeline_name)
if pipeline_id is None:
raise ValueError(
f"Can't find a pipeline with name: {pipeline_name}")
version = client_obj.upload_pipeline_version(
pipeline_package_path=package_file,
pipeline_version_name=pipeline_version,
pipeline_id=pipeline_id,
pipeline_name=pipeline_name,
description=description)
output.print_output(
version,
output.ModelType.PIPELINE,
output_format,
)
@pipeline.command()
@click.option(
'--page-token',
default='',
help=parsing.get_param_descr(client.Client.list_pipelines, 'page_token'))
@click.option(
'-m',
'--max-size',
default=100,
help=parsing.get_param_descr(client.Client.list_pipelines, 'page_size'))
@click.option(
'--sort-by',
default='created_at desc',
help=parsing.get_param_descr(client.Client.list_pipelines, 'sort_by'))
@click.option(
'--filter',
help=parsing.get_param_descr(client.Client.list_pipelines, 'filter'))
@click.pass_context
def list(ctx: click.Context, page_token: str, max_size: int, sort_by: str,
filter: str):
"""List pipelines."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
response = client_obj.list_pipelines(
page_token=page_token,
page_size=max_size,
sort_by=sort_by,
filter=filter)
output.print_output(
response.pipelines or [],
output.ModelType.PIPELINE,
output_format,
)
@pipeline.command()
@click.argument('pipeline-id')
@click.option(
'--page-token',
default='',
help=parsing.get_param_descr(client.Client.list_pipeline_versions,
'page_token'))
@click.option(
'-m',
'--max-size',
default=100,
help=parsing.get_param_descr(client.Client.list_pipeline_versions,
'page_size'))
@click.option(
'--sort-by',
default='created_at desc',
help=parsing.get_param_descr(client.Client.list_pipeline_versions,
'sort_by'))
@click.option(
'--filter',
help=parsing.get_param_descr(client.Client.list_pipeline_versions,
'filter'))
@click.pass_context
def list_versions(ctx: click.Context, pipeline_id: str, page_token: str,
max_size: int, sort_by: str, filter: str):
"""List versions of a pipeline."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
response = client_obj.list_pipeline_versions(
pipeline_id,
page_token=page_token,
page_size=max_size,
sort_by=sort_by,
filter=filter)
output.print_output(
response.versions or [],
output.ModelType.PIPELINE,
output_format,
)
@pipeline.command()
@click.argument('version-id')
@click.pass_context
def delete_version(ctx: click.Context, version_id: str):
"""Delete a version of a pipeline."""
confirmation = f'Are you sure you want to delete pipeline version {version_id}?'
if not click.confirm(confirmation):
return
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
client_obj.delete_pipeline_version(version_id)
output.print_deleted_text('pipeline version', version_id, output_format)
@pipeline.command()
@click.argument('pipeline-id')
@click.pass_context
def get(ctx: click.Context, pipeline_id: str):
"""Get information about a pipeline."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
pipeline = client_obj.get_pipeline(pipeline_id)
output.print_output(
pipeline,
output.ModelType.PIPELINE,
output_format,
)
@pipeline.command()
@click.argument('version-id')
@click.pass_context
def get_version(ctx: click.Context, version_id: str):
"""Get information about a version of a pipeline."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
version = client_obj.get_pipeline_version(version_id=version_id)
output.print_output(
version,
output.ModelType.PIPELINE,
output_format,
)
@pipeline.command()
@click.argument('pipeline-id')
@click.pass_context
def delete(ctx: click.Context, pipeline_id: str):
"""Delete a pipeline."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
confirmation = f'Are you sure you want to delete pipeline {pipeline_id}?'
if not click.confirm(confirmation):
return
client_obj.delete_pipeline(pipeline_id)
output.print_deleted_text('pipeline', pipeline_id, output_format)
```
#### File: kfp/cli/run.py
```python
import json
import shutil
import subprocess
import sys
import time
from typing import List
import click
from kfp import client
from kfp.cli import output
from kfp.cli.utils import deprecated_alias_group
from kfp.cli.utils import parsing
@click.group(
cls=deprecated_alias_group.deprecated_alias_group_factory(
{'submit': 'create'}))
def run():
"""Manage run resources."""
pass
@run.command()
@click.option(
'-e',
'--experiment-id',
help=parsing.get_param_descr(client.Client.list_runs, 'experiment_id'))
@click.option(
'--page-token',
default='',
help=parsing.get_param_descr(client.Client.list_runs, 'page_token'))
@click.option(
'-m',
'--max-size',
default=100,
help=parsing.get_param_descr(client.Client.list_runs, 'page_size'))
@click.option(
'--sort-by',
default='created_at desc',
help=parsing.get_param_descr(client.Client.list_runs, 'sort_by'))
@click.option(
'--filter', help=parsing.get_param_descr(client.Client.list_runs, 'filter'))
@click.pass_context
def list(ctx: click.Context, experiment_id: str, page_token: str, max_size: int,
sort_by: str, filter: str):
"""List pipeline runs."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
response = client_obj.list_runs(
experiment_id=experiment_id,
page_token=page_token,
page_size=max_size,
sort_by=sort_by,
filter=filter)
output.print_output(
response.runs or [],
output.ModelType.RUN,
output_format,
)
@run.command()
@click.option(
'-e',
'--experiment-name',
required=True,
help='Experiment name of the run.')
@click.option(
'-r',
'--run-name',
help=parsing.get_param_descr(client.Client.run_pipeline, 'job_name'))
@click.option(
'-f',
'--package-file',
type=click.Path(exists=True, dir_okay=False),
help=parsing.get_param_descr(client.Client.run_pipeline,
'pipeline_package_path'))
@click.option(
'-p',
'--pipeline-id',
help=parsing.get_param_descr(client.Client.run_pipeline, 'pipeline_id'))
@click.option('-n', '--pipeline-name', help='Name of the pipeline template.')
@click.option(
'-w',
'--watch',
is_flag=True,
default=False,
help='Watch the run status until it finishes.')
@click.option(
'-v',
'--version',
help=parsing.get_param_descr(client.Client.run_pipeline, 'version_id'))
@click.option(
'-t',
'--timeout',
default=0,
help='Wait for a run to complete until timeout in seconds.',
type=int)
@click.argument('args', nargs=-1)
@click.pass_context
def create(ctx: click.Context, experiment_name: str, run_name: str,
package_file: str, pipeline_id: str, pipeline_name: str, watch: bool,
timeout: int, version: str, args: List[str]):
"""Submit a pipeline run."""
client_obj: client.Client = ctx.obj['client']
namespace = ctx.obj['namespace']
output_format = ctx.obj['output']
if not run_name:
run_name = experiment_name
if not pipeline_id and pipeline_name:
pipeline_id = client_obj.get_pipeline_id(name=pipeline_name)
if not package_file and not pipeline_id and not version:
click.echo(
'You must provide one of [package_file, pipeline_id, version].',
err=True)
sys.exit(1)
arg_dict = dict(arg.split('=', maxsplit=1) for arg in args)
experiment = client_obj.create_experiment(experiment_name)
run = client_obj.run_pipeline(
experiment_id=experiment.id,
job_name=run_name,
pipeline_package_path=package_file,
params=arg_dict,
pipeline_id=pipeline_id,
version_id=version)
if timeout > 0:
run_detail = client_obj.wait_for_run_completion(run.id, timeout)
output.print_output(
run_detail.run,
output.ModelType.RUN,
output_format,
)
else:
display_run(client_obj, namespace, run.id, watch, output_format)
@run.command()
@click.option(
'-w',
'--watch',
is_flag=True,
default=False,
help='Watch the run status until it finishes.')
@click.option(
'-d',
'--detail',
is_flag=True,
default=False,
help='Get detailed information of the run in json format.')
@click.argument('run-id')
@click.pass_context
def get(ctx: click.Context, watch: bool, detail: bool, run_id: str):
"""Get information about a pipeline run."""
client_obj: client.Client = ctx.obj['client']
namespace = ctx.obj['namespace']
output_format = ctx.obj['output']
if detail:
output_format = 'json'
click.echo(
'The --detail/-d flag is deprecated. Please use --output=json instead.',
err=True)
display_run(client_obj, namespace, run_id, watch, output_format)
@run.command()
@click.argument('run-id')
@click.pass_context
def archive(ctx: click.Context, run_id: str):
"""Archive a pipeline run."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
client_obj.archive_run(run_id=run_id)
run = client_obj.get_run(run_id=run_id)
output.print_output(
run.run,
output.ModelType.RUN,
output_format,
)
@run.command()
@click.argument('run-id')
@click.pass_context
def unarchive(ctx: click.Context, run_id: str):
"""Unarchive a pipeline run."""
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
client_obj.unarchive_run(run_id=run_id)
run = client_obj.get_run(run_id=run_id)
output.print_output(
run.run,
output.ModelType.RUN,
output_format,
)
@run.command()
@click.argument('run-id')
@click.pass_context
def delete(ctx: click.Context, run_id: str):
"""Delete a pipeline run."""
confirmation = f'Are you sure you want to delete run {run_id}?'
if not click.confirm(confirmation):
return
client_obj: client.Client = ctx.obj['client']
output_format = ctx.obj['output']
client_obj.delete_run(run_id=run_id)
output.print_deleted_text('run', run_id, output_format)
def display_run(client: client.Client, namespace: str, run_id: str, watch: bool,
output_format: str):
run = client.get_run(run_id).run
output.print_output(
run,
output.ModelType.RUN,
output_format,
)
if not watch:
return
argo_path = shutil.which('argo')
if not argo_path:
raise RuntimeError(
"argo isn't found in $PATH. It's necessary for watch. "
"Please make sure it's installed and available. "
'Installation instructions be found here - '
'https://github.com/argoproj/argo-workflows/releases')
argo_workflow_name = None
while True:
time.sleep(1)
run_detail = client.get_run(run_id)
run = run_detail.run
if run_detail.pipeline_runtime and run_detail.pipeline_runtime.workflow_manifest:
manifest = json.loads(run_detail.pipeline_runtime.workflow_manifest)
if manifest['metadata'] and manifest['metadata']['name']:
argo_workflow_name = manifest['metadata']['name']
break
if run_detail.run.status in ['Succeeded', 'Skipped', 'Failed', 'Error']:
click.echo(f'Run is finished with status {run_detail.run.status}.')
return
if argo_workflow_name:
subprocess.run(
[argo_path, 'watch', argo_workflow_name, '-n', namespace])
output.print_output(
run,
output.ModelType.RUN,
output_format,
)
```
#### File: kfp/components/yaml_component_test.py
```python
import os
import tempfile
import textwrap
import unittest
from unittest import mock
import requests
from kfp.components import structures
from kfp.components import yaml_component
SAMPLE_YAML = textwrap.dedent("""\
components:
comp-component-1:
executorLabel: exec-component-1
inputDefinitions:
parameters:
input1:
parameterType: STRING
outputDefinitions:
parameters:
output1:
parameterType: STRING
deploymentSpec:
executors:
exec-component-1:
container:
command:
- sh
- -c
- 'set -ex
echo "$0" > "$1"'
- '{{$.inputs.parameters[''input1'']}}'
- '{{$.outputs.parameters[''output1''].output_file}}'
image: alpine
pipelineInfo:
name: component-1
root:
dag:
tasks:
component-1:
cachingOptions:
enableCache: true
componentRef:
name: comp-component-1
inputs:
parameters:
input1:
componentInputParameter: input1
taskInfo:
name: component-1
inputDefinitions:
parameters:
input1:
parameterType: STRING
schemaVersion: 2.1.0
sdkVersion: kfp-2.0.0-alpha.3
""")
class YamlComponentTest(unittest.TestCase):
def test_load_component_from_text(self):
component = yaml_component.load_component_from_text(SAMPLE_YAML)
self.assertEqual(component.component_spec.name, 'component-1')
self.assertEqual(component.component_spec.outputs,
{'output1': structures.OutputSpec(type='String')})
self.assertEqual(component._component_inputs, {'input1'})
self.assertEqual(component.name, 'component-1')
self.assertEqual(
component.component_spec.implementation.container.image, 'alpine')
def test_load_component_from_file(self):
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, 'sample_yaml.yaml')
with open(path, 'w') as f:
f.write(SAMPLE_YAML)
component = yaml_component.load_component_from_file(path)
self.assertEqual(component.component_spec.name, 'component-1')
self.assertEqual(component.component_spec.outputs,
{'output1': structures.OutputSpec(type='String')})
self.assertEqual(component._component_inputs, {'input1'})
self.assertEqual(component.name, 'component-1')
self.assertEqual(
component.component_spec.implementation.container.image, 'alpine')
def test_load_component_from_url(self):
component_url = 'https://raw.githubusercontent.com/some/repo/components/component_group/component.yaml'
def mock_response_factory(url, params=None, **kwargs):
if url == component_url:
response = requests.Response()
response.url = component_url
response.status_code = 200
response._content = SAMPLE_YAML
return response
raise RuntimeError('Unexpected URL "{}"'.format(url))
with mock.patch('requests.get', mock_response_factory):
component = yaml_component.load_component_from_url(component_url)
self.assertEqual(component.component_spec.name, 'component-1')
self.assertEqual(component.component_spec.outputs,
{'output1': structures.OutputSpec(type='String')})
self.assertEqual(component._component_inputs, {'input1'})
self.assertEqual(component.name, 'component-1')
self.assertEqual(
component.component_spec.implementation.container.image,
'alpine')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JohnMagnusson/messenger-analyser",
"score": 3
} |
#### File: JohnMagnusson/messenger-analyser/json_parser.py
```python
import numpy as np
from model.content_types import ContentType
from model.group_chat import GroupChat
from model.message import Message
from model.reaction import Reaction
class JsonParser:
"""
Parses the facebook data files to a GroupChat object
"""
def json_to_group_chat(self, data_files):
users = set()
messages = []
for data in data_files:
users.update(self.__get_participants_from_json(data["participants"]))
messages.extend(self.__get_messages_from_json(data["messages"]))
return GroupChat(users, np.array(messages))
def __get_participants_from_json(self, json_participants):
return [name["name"].encode("latin_1").decode("utf_8") for name in json_participants]
def __get_messages_from_json(self, json_messages):
messages = []
for dict_message in json_messages:
timestamp = dict_message["timestamp_ms"]
sender = dict_message["sender_name"].encode("latin_1").decode("utf_8") # Ensure name is correct
reactions = self.__parse_reactions_from_json(dict_message)
content_type = self.__parse_type_from_json(dict_message["type"], dict_message)
content = self.__parse_content_from_json(content_type, dict_message)
new_message = Message(timestamp, sender, reactions, content_type, content)
messages.append(new_message)
return messages
def __parse_reactions_from_json(self, dict_message):
if "reactions" in dict_message:
reactions = []
for reaction in dict_message["reactions"]:
reactions.append(Reaction(reaction["actor"], reaction["reaction"]))
return reactions
return None # The case where no one reacted to the message
def __parse_type_from_json(self, json_type, dict_message):
"""
Converts facebook type to internal
:param json_type: The type specified in the file
:param dict_message: The whole dict used to find internal differences
:return: An enum specifying the content type
"""
if json_type == "Generic": # Generic has multiple payloads we need to check
if "sticker" in dict_message:
return ContentType.STICKER
elif "gifs" in dict_message:
return ContentType.GIF
elif "videos" in dict_message:
return ContentType.VIDEO
elif "photos" in dict_message:
return ContentType.IMAGE
elif "content" in dict_message:
return ContentType.TEXT
elif "audio_files" in dict_message:
return ContentType.AUDIO
elif len(dict_message) == 3: # The case where empty message, we verify by controlling nr fields
return ContentType.EMPTY
else: # Todo fortsätt med att fixa all olika möjlig data
raise ValueError("The generic type had unknown payload, " + str(dict_message))
elif json_type == "Share":
return ContentType.SHARE
elif json_type == "Call":
return ContentType.CALL
elif json_type == "Subscribe":
return ContentType.SUBSCRIBE
else:
raise ValueError("Unsupported json type, " + str(json_type))
def __parse_content_from_json(self, content_type, dict_message):
"""
Parses the JSON to get information from the message depending on its type.
:param content_type: The type of content
:param dict_message: The json
:return: The content of the message
"""
if content_type == ContentType.TEXT:
return dict_message["content"].encode("latin_1").decode("utf_8").lower() # Fixing encoding of the data
elif content_type == ContentType.SHARE:
if "share" in dict_message:
if "link" in dict_message["share"]: # For sharing links
return dict_message["share"]["link"]
elif "share_text" in dict_message["share"]: # For sharing location
return dict_message["share"]["share_text"]
else:
raise ValueError("The message had an unknown share content " + str(dict_message["share"]))
else:
if "content" in dict_message:
return dict_message["content"]
else:
raise ValueError("The message was share classified but no content " + str(dict_message["share"]))
elif content_type == ContentType.GIF:
return self.__get_nested_uri(dict_message["gifs"])
elif content_type == ContentType.IMAGE:
return self.__get_nested_uri(dict_message["photos"])
elif content_type == ContentType.VIDEO:
return self.__get_nested_uri(dict_message["videos"]) # Only takes the uri, ignore ts of video and thumbnail
elif content_type == ContentType.STICKER:
return dict_message["sticker"]["uri"]
elif content_type == ContentType.EMPTY:
return "Empty"
elif content_type == ContentType.CALL:
return dict_message["call_duration"] # Returns how long the call was
elif content_type == ContentType.SUBSCRIBE:
new_users = []
for user in dict_message["users"]:
new_users.append(user["name"])
return new_users
elif content_type == ContentType.AUDIO:
return self.__get_nested_uri(dict_message["audio_files"])
else:
raise ValueError("content_type is not known" + str(content_type))
def __get_nested_uri(self, data):
entities = []
for entity in data:
entities.append(entity["uri"]) # Only takes the uri, ignore possibly other attribues
return entities
``` |
{
"source": "JohnMaguire/pplugins",
"score": 2
} |
#### File: JohnMaguire/pplugins/test_pplugins.py
```python
import multiprocessing
import threading
from six.moves import queue
from mock import patch
import pytest
import pplugins
DEFAULT = "default"
def test_pluginerror():
with pytest.raises(pplugins.PluginError) as exc_info:
raise pplugins.PluginError("Generic error", "foo")
assert exc_info.value.plugin == "foo"
assert "foo" in str(exc_info.value)
def test_plugin_abstract():
with pytest.raises(TypeError):
pplugins.Plugin()
@patch.multiple(pplugins.Plugin, __abstractmethods__=set())
@patch.object(pplugins.Plugin, 'run')
def test_plugin_constructor(run_mock):
interface = pplugins.PluginInterface(None, None)
plugin = pplugins.Plugin(interface)
# Assert that the interface is set on the plugin object
assert plugin.interface == interface
run_mock.assert_called_once_with()
def test_pluginrunner_abstract():
with pytest.raises(TypeError):
pplugins.PluginRunner()
@patch.multiple(pplugins.PluginRunner, __abstractmethods__=set())
def test_pluginrunner_constructor():
pr = pplugins.PluginRunner(None, None, None)
# multiprocessing daemon flag
assert pr.daemon is True
@patch.multiple(pplugins.Plugin, __abstractmethods__=set())
@patch.multiple(pplugins.PluginRunner, __abstractmethods__=set())
def test_pluginrunner_run():
pr = pplugins.PluginRunner(None, None, None)
# assert plugin is called from pluginrunner
class PluginStub(pplugins.Plugin):
pass
module = type('Module', (), {'PluginStub': PluginStub})
with patch.object(pplugins.Plugin, '__init__', return_value=None), \
patch.object(pplugins.PluginRunner, '_load_plugin',
return_value=module) as constructor_mock:
pr.run()
constructor_mock.assert_any_call()
# ensure exceptions are caught
class ErrorPluginStub(pplugins.Plugin):
def __init__(self, _):
raise Exception
module = type('Module', (), {'ErrorPluginStub': ErrorPluginStub})
with patch.object(pplugins.PluginRunner, '_load_plugin',
return_value=module):
pr.run()
# ensure exception is raised if a plugin can't be found
module = type('Module', (), {})
with pytest.raises(pplugins.PluginError) as excinfo, \
patch.object(pplugins.PluginRunner, '_load_plugin',
return_value=module) as load_plugin_mock:
pr.run()
assert 'find' in str(excinfo.value)
assert str(pr.plugin_class) in str(excinfo.value)
# assert the overrided _load_plugin() got called
load_plugin_mock.assert_called_once_with()
def test_pluginmanager_abstract():
with pytest.raises(TypeError):
pplugins.PluginManager()
@patch.multiple(pplugins.PluginManager, __abstractmethods__=set())
def test_pluginmanager_constructor():
threads = threading.active_count()
pplugins.PluginManager()
# plugin manager should not have started the reaping thread when called
# through the constructor
assert threading.active_count() == threads
@patch.multiple(pplugins.PluginManager, __abstractmethods__=set())
def test_pluginmanager_contextmanager():
threads = threading.active_count()
with pplugins.PluginManager():
# assert that reaping thread was started
assert threading.active_count() == threads + 1
# assert that reaping thread was stopped
assert threading.active_count() == threads
@patch.multiple(pplugins.PluginRunner, __abstractmethods__=set())
@patch.multiple(pplugins.PluginManager, __abstractmethods__=set())
@patch.object(pplugins.PluginManager, 'reap_plugins', return_value=None)
@patch.object(multiprocessing.Process, 'start', return_value=None)
def test_pluginmanager_start_plugin(_, __):
pm = pplugins.PluginManager()
# test starting a plugin
class PluginRunnerStub(pplugins.PluginRunner):
def run(self):
pass
pm.plugin_runner = PluginRunnerStub
pm.start_plugin('foo')
assert 'foo' in pm.plugins
# test plugin already running
with pytest.raises(pplugins.PluginError) as excinfo:
pm.start_plugin('foo')
assert 'already running' in str(excinfo.value)
# test error starting a plugin
class PluginRunnerErrorStub(pplugins.PluginRunner):
def __init__(self, _, __, ___):
raise Exception
pm.plugin_runner = PluginRunnerErrorStub
with pytest.raises(Exception), patch.multiple(pm, plugins={}):
pm.start_plugin('foo')
@patch.multiple(pplugins.PluginManager, __abstractmethods__=set())
@patch.object(pplugins.PluginManager, 'reap_plugins', return_value=None)
@patch.object(pplugins.PluginManager, '_stop_plugin', return_value=None)
def test_pluginmanager_stop_plugin(stop_plugin_mock, _):
pm = pplugins.PluginManager()
plugins = dict(test={'process': multiprocessing.Process()},
**pm.plugins)
# non-existent plugin
pm.stop_plugin('test')
# cleanly
pm.plugins = plugins
with patch.object(multiprocessing.Process, 'is_alive', return_value=False):
pm.stop_plugin('test')
stop_plugin_mock.assert_called_once_with('test')
assert pm.plugins == {}
# forcefully
plugins = dict(test={'process': multiprocessing.Process()},
**pm.plugins)
pm.plugins = plugins
with patch.object(multiprocessing.Process, 'is_alive',
return_value=True), \
patch.object(multiprocessing.Process, 'terminate',
return_value=None) as terminate_mock:
pm.stop_plugin('test')
terminate_mock.assert_called_once_with()
assert pm.plugins == {}
@patch.object(pplugins.PluginManager, 'reap_plugins', return_value=None)
@patch.multiple(pplugins.PluginManager, __abstractmethods__=set())
def test_pluginmanager_process_messages(_):
pm = pplugins.PluginManager()
q = queue.Queue()
pm.plugins = {'test': {'messages': q}}
# empty queue
with patch.object(pplugins.PluginManager, '_process_message',
return_value=None) as process_message_mock:
pm.process_messages()
process_message_mock.assert_not_called()
# _process_message must be implemented
q.put('test message')
with pytest.raises(NotImplementedError):
pm.process_messages()
# with a message
q.put('test message')
with patch.object(pplugins.PluginManager, '_process_message',
return_value=None) as process_message_mock:
pm.process_messages()
process_message_mock.assert_called_once_with('test', 'test message')
@patch.multiple(pplugins.PluginManager, __abstractmethods__=set())
def test_pluginmanager_reap_plugins():
pm = pplugins.PluginManager()
plugins = dict(test={'process': multiprocessing.Process()},
**pm.plugins)
# reap dead processes
pm.plugins = plugins
with patch.object(multiprocessing.Process, 'is_alive', return_value=False):
pm.reap_plugins()
assert pm.plugins == {}
# don't reap living processes
pm.plugins = plugins
with patch.object(multiprocessing.Process, 'is_alive', return_value=True):
pm.reap_plugins()
assert pm.plugins == plugins
``` |
{
"source": "johnmahlon/garecovery",
"score": 2
} |
#### File: garecovery/tests/test_recovery_scan.py
```python
import mock
import wallycore as wally
import garecovery.two_of_three
from garecovery.clargs import DEFAULT_SUBACCOUNT_SEARCH_DEPTH
from gaservices.utils import txutil
from .util import AuthServiceProxy, datafile, get_output, parse_summary, raise_IOError
garecovery.bitcoin_config.open = raise_IOError
sub_depth = DEFAULT_SUBACCOUNT_SEARCH_DEPTH
key_depth = 20
destination_address = 'mynHfTyTWyGGB76NBFbfUrTnn8YWQkTJVs'
@mock.patch('garecovery.two_of_three.bitcoincore.AuthServiceProxy')
def test_recover_2of3(mock_bitcoincore):
"""Test 2of3 happy path"""
mock_bitcoincore.return_value = AuthServiceProxy('testnet_txs')
estimate = {'blocks': 3, 'feerate': 1, }
mock_bitcoincore.return_value.estimatesmartfee.return_value = estimate
args = [
'--mnemonic-file={}'.format(datafile('mnemonic_6.txt')),
'--rpcuser=abc',
'--rpcpassword=<PASSWORD>',
'2of3',
'--network=testnet',
'--recovery-mnemonic-file={}'.format(datafile('mnemonic_7.txt')),
'--key-search-depth={}'.format(key_depth),
'--search-subaccounts={}'.format(sub_depth),
'--destination-address={}'.format(destination_address),
]
# Raw tx
output = get_output(args).strip()
assert output == open(datafile("signed_2of3_5")).read().strip()
# Check replace by fee is set
tx = txutil.from_hex(output)
assert wally.tx_get_num_inputs(tx) == 1
assert wally.tx_get_input_sequence(tx, 0) == int(32*'1', 2) - 2
# Summary
args = ['--show-summary', ] + args
output = get_output(args)
summary = parse_summary(output)
assert len(summary) == 1
assert summary[0]['destination address'] == destination_address
@mock.patch('garecovery.two_of_three.bitcoincore.AuthServiceProxy')
def test_set_nlocktime(mock_bitcoincore):
"""Test that newly created recovery transactions have nlocktime = current blockheight + 1"""
mock_bitcoincore.return_value = AuthServiceProxy('testnet_txs')
estimate = {'blocks': 3, 'feerate': 1, }
mock_bitcoincore.return_value.estimatesmartfee.return_value = estimate
current_blockheight = 123
mock_bitcoincore.return_value.getblockcount.return_value = current_blockheight
args = [
'--mnemonic-file={}'.format(datafile('mnemonic_6.txt')),
'--rpcuser=abc',
'--rpcpassword=<PASSWORD>',
'2of3',
'--network=testnet',
'--recovery-mnemonic-file={}'.format(datafile('mnemonic_7.txt')),
'--key-search-depth={}'.format(key_depth),
'--search-subaccounts={}'.format(sub_depth),
'--destination-address={}'.format(destination_address),
]
output = get_output(args).strip()
tx = txutil.from_hex(output)
assert wally.tx_get_locktime(tx) == current_blockheight
``` |
{
"source": "JohnMai1994/CS116-2018_Winter_Term",
"score": 2
} |
#### File: a01-j4mai/a01-j4mai/a01q3.py
```python
import math
import check
## QUESTION 3
## basic_grade(assts, mid_exam, final_exam, clickers, tutorials) return the basic
## grade based on the grading scheme, 20% Assts, 30% mid_exam, 45% final_exam
## 5% for clickers and tutorials times
## basic_grade: Float Float Float Float Nat -> Nat
## Require:
## assts, mid_exam, final_exam and clickers are floating point value between
## 0 and 100 (inclusive)
## tutorials is natural number between 1 and 12 (inclusive)
## Examples
## basic_grade(60.0, 75.8, 90.0, 55.5, 9) => 79
## baisc_grade(0.0, 0.0, 0.0, 0.0, 10) => 1
def basic_grade(assts, mid_exam, final_exam, clickers, tutorials):
ea_part = (0.2 * assts) + (0.3 * mid_exam) +(0.45 * final_exam)
participation = min(5, 0.05 * clickers + 0.1 * tutorials)
return round (ea_part + participation)
## Test1: Regular Situation
check.expect("q3Test1", basic_grade(60.0, 75.8, 90.0, 55.5, 9), 79)
## Test2: No assignment, midterm and final grade
check.expect("q3Test2", basic_grade(0.0, 0.0, 0.0, 0.0, 10), 1)
## Test3: Participation Grade is not maximum
check.expect("q3Test3", basic_grade(80, 90, 78, 50, 2), 81)
## Test4: Full mark
check.expect("q3Test4", basic_grade(100, 100, 100, 100, 12), 100)
```
#### File: a03-j4mai/a03-j4mai/a03q4.py
```python
import check
#Q4
# space_position(s, n) consumes a string, s, and a natural number, n, and returns
# an number n space position in s.
# space_position: Str Nat -> Nat
# Requires:
# s is a non-empty string
# n is a natural number greater than 0
# Examples:
# space_position("ab ab ar cde fa e", 3) => 8
def space_position(s, n):
g = 0
if n == 0:
return -1
if s[0:1].isspace():
g = 1 + space_position(s[1:], n-1)
else:
if s.count(" ") >= n:
g = 1 + space_position(s[1:], n)
else:
g = len(s) +1
return g
# make_poem(s,n) consumes a string, s, and a natural number, n, and prints out
# multiple lines, the same string as s where each line has n words in it.
# make_poem: Str Nat -> None
# Requires:
# s is a non-empty string
# n is a natural number greater than 0
# Examples:
# make_poem("How many Lowes would Rob Lowe rob if Rob Lowe could rob Lowes", 3)
# will print:
# How many Lowes
# would Rob Lowe
# rob if Rob
# Lowe could rob
# Lowes
def make_poem(s,n):
k = space_position(s, n)
if k >= len(s):
print(s)
else:
print(s[0:k])
make_poem(s[k+1:], n)
# Test Example, n = 3
check.set_screen("How many Lowes\nwould Rob Lowe\nrob if Rob\nLowe could rob\nLowes")
check.expect("Q4T1", make_poem("How many Lowes would Rob Lowe rob if Rob Lowe could rob Lowes", 3),
None)
# Test when n = 1
check.set_screen("abc\ndef\nhijk\nlmno\npQ")
check.expect("Q4T2", make_poem("abc def hijk lmno pQ",1), None)
# Test When n = 4
check.set_screen("my job is very\nawaresome and my dad\nis superman")
check.expect("Q4T3", make_poem("my job is very awaresome and my dad is superman", 4), None)
```
#### File: a04-j4mai/a04-j4mai/a04q3.py
```python
import math
import check
# A MineGrid is a (listof (listof Bool))
# Requires: All lists are non-empty
# Each (listof Bool) has the same length
# note: True means mine, False means safe
# A MineBoard is a (listof (listof Str))
# Requires: Each string is either a mine ('*') hidden(' ')
# or safe (a digit between '0' and '8')
# All lists are non-empty
# Each (listof Str) has the same length
# Example board from the assignment file
grid3x3 = [[True ,False,False],
[False,False,False],
[False,False,True]]
board3x3 = [[' ', '1', '0'],
[' ', '2', '1'],
[' ', ' ', '*']]
grid4x4 = [[True,False,False,False,],
[False,False,False,False],
[False,False,True,True],
[True,False,True,False]]
board4x4 = [['','','',''],
['','','',''],
['','','',''],
['','','','']]
## Question 3
# count_mines_no(grid,row,col) returns how many mine tiles are near to the tile at
# the position row and col in the grid, includes diagonals and itself
# count_mines_no: MineBoard Nat Nat -> None
# requires: grid and board have the same dimensions and are consistent
# 0 <= row < height of board
# 0 <= col < width of board
# examples:
# count_mines_no(grid4x4,0,0) => 1
# count_mines_no(grid4x4,3,2) => 3
def count_mines_no(grid,row,col):
row_line = len(grid)-1
col_line = len(grid[1])
if row-1 < 0:
return len(list(filter(lambda s: s== True,grid[row][max(0,col-1):min(col_line,col+2)])))\
+ len(list(filter(lambda s: s== True,grid[min(row_line,row+1)][max(0,col-1):min(col_line,col+2)])))
elif row + 1 > row_line:
return len(list(filter(lambda s: s== True,grid[max(0,row-1)][max(0,col-1):min(col_line,col+2)])))\
+ len(list(filter(lambda s: s== True,grid[row][max(0,col-1):min(col_line,col+2)])))
else:
return len(list(filter(lambda s: s== True,grid[max(0,row-1)][max(0,col-1):min(col_line,col+2)])))\
+ len(list(filter(lambda s: s== True,grid[row][max(0,col-1):min(col_line,col+2)])))\
+ len(list(filter(lambda s: s== True,grid[min(row_line,row+1)][max(0,col-1):min(col_line,col+2)])))
# count_mines(grid,row,col) returns how many mine tiles are near to the tile at
# the position row and col in the grid, includes diagonals
# count_mines: MineBoard Nat Nat -> None
# requires: grid and board have the same dimensions and are consistent
# 0 <= row < height of board
# 0 <= col < width of board
# examples:
# count_mines(grid4x4,0,1) => 1
# count_mines(grid4x4,3,3) => 3
# count_mines(grid4x4,3,0) => 0
def count_mines(grid,row,col):
if grid[row][col]:
return 0
else:
return count_mines_no(grid,row,col)
# Test1 for helper function: No Mine surrounded
check.expect('Q3T1H', count_mines(grid3x3,2,0), 0)
check.expect('Q3T2H', count_mines(grid4x4,0,3), 0)
# Test2 for helper funciton: one Mine surrounded
check.expect('Q3T3H', count_mines(grid3x3,1,0), 1)
check.expect('Q3T4H', count_mines(grid4x4,1,0), 1)
check.expect('Q3T5H', count_mines(grid4x4,2,0), 1)
# Test3 for helper function: two or more Mine surrounded
check.expect('Q3T6H', count_mines(grid3x3,1,1), 2)
check.expect('Q3T7H', count_mines(grid4x4,3,1), 3)
check.expect('Q3T8H', count_mines(grid4x4,3,3), 3)
# Test4 for helper function: on Mine
check.expect('Q3T9H', count_mines(grid4x4,0,0), 0)
check.expect('Q3T10H', count_mines(grid4x4, 2,2),0)
# reveal(grid,board, row, col) reveals the tile at the given row and col(umn)
# in board, using the mine positions from grid
# reveal: MineGrid MineBoard -> None
# requires: grid and board have the same dimensions and are consistent
# 0 <= row < height of board
# 0 <= col < width of board
# effects: board is mutated
# Examples:
# reveal(grid3x3, board3x3, 0,0) => None, and changes contents of board3x3
# to [['*', '1', '0'], [' ', '2', '1'], [' ', ' ', '*']]
# reveal(grid3x3, board3x3, 1,0) => None, and changes contents of board3x3
# to [[' ', '1', '0'], ['1', '2', '1'], [' ', ' ', '*']]
def reveal(grid,board,row,col):
if grid[row][col]:
board[row][col] = '*'
else:
board[row][col] = str(count_mines(grid,row,col))
# Tests:
grid3x3 = [[True ,False,False],
[False,False,False],
[False,False,True]]
board3x3 = [[' ', '1', '0'],
[' ', '2', '1'],
[' ', ' ', '*']]
# Test1: the position is mine
check.expect('Q3T1', reveal(grid3x3,board3x3,0,0), None)
check.expect('Q3T1(M)', board3x3, [['*', '1', '0'], [' ', '2', '1'],[' ', ' ', '*']])
# Test2: the position is surrounded by 1 mine
check.expect('Q3T2', reveal(grid3x3,board3x3,1,0), None)
check.expect('Q3T2(1)', board3x3, [['*', '1', '0'], ['1', '2', '1'],[' ', ' ', '*']])
check.expect('Q3T3', reveal(grid3x3,board3x3,2,1), None)
check.expect('Q3T3(1)', board3x3, [['*', '1', '0'], ['1', '2', '1'],[' ', '1', '*']])
# Test3: the position is not surrounded by mine
check.expect('Q3T4', reveal(grid3x3,board3x3,2,0), None)
check.expect('Q3T4(None)', board3x3, [['*', '1', '0'], ['1', '2', '1'],['0', '1', '*']])
```
#### File: a04-j4mai/a04-j4mai/GRADED_ASSIGNMENT.py
```python
ASSIGNMENT 04
Student's Quest ID: j4mai
**** Testing Results **********************************************************
65/69 Total Mark
** Question 1: 14/14
** Question 2: 16/16
** Question 3: 28/32
** Question 4: 7/7
(Question 1, Test t01, 1 marks): ^apowers:base^a: Passed; passed.
(Question 1, Test t02, 1 marks): ^apowers:general^a: Passed; passed.
(Question 1, Test t03, 1 marks): ^apowers:edge^a: Passed; passed.
(Question 1, Test t04, 1 marks): ^apowers:zero^a: Passed; passed.
(Question 1, Test t05, 1 marks): ^awins:base^a: Passed; passed.
(Question 1, Test t06, 1 marks): ^awins:general^a: Passed; passed.
(Question 1, Test t07, 1 marks): ^awins:no wins^a: Passed; passed.
(Question 1, Test t08, 1 marks): ^awins:all wins^a: Passed; passed.
(Question 1, Test t09, 1 marks): ^awins:tie^a: Passed; passed.
(Question 1, Test t10, 1 marks): ^awins:one game^a: Passed; passed.
(Question 1, Test t11, 1 marks): ^aadd_lists:base^a: Passed; passed.
(Question 1, Test t12, 1 marks): ^aadd_lists:general^a: Passed; passed.
(Question 1, Test t13, 1 marks): ^aadd_lists:edge^a: Passed; passed.
(Question 1, Test t14, 1 marks): ^aadd_lists:no change^a: Passed; passed.
(Question 2, Test t01, 1 marks): ^ashouting:base^a: Passed; passed.
(Question 2, Test t02, 1 marks): ^ashouting:edge^a: Passed; passed.
(Question 2, Test t03, 1 marks): ^ashouting:ties^a: Passed; passed.
(Question 2, Test t04, 1 marks): ^ashouting:general^a: Passed; passed.
(Question 2, Test t05, 1 marks): ^ashouting:ignore^a: Passed; passed.
(Question 2, Test t06, 1 marks): ^ashouting:all same case^a: Passed; passed.
(Question 2, Test t07, 1 marks): ^areplace:base^a: Passed; passed.
(Question 2, Test t08, 1 marks): ^areplace:general^a: Passed; passed.
(Question 2, Test t09, 1 marks): ^areplace:edge^a: Passed; passed.
(Question 2, Test t10, 1 marks): ^areplace:all^a: Passed; passed.
(Question 2, Test t11, 1 marks): ^areplace:none^a: Passed; passed.
(Question 2, Test t12, 1 marks): ^aquot:base^a: Passed; passed.
(Question 2, Test t13, 1 marks): ^aquot:general^a: Passed; passed.
(Question 2, Test t14, 1 marks): ^aquot:one^a: Passed; passed.
(Question 2, Test t15, 1 marks): ^aquot:none^a: Passed; passed.
(Question 2, Test t16, 1 marks): ^aquot:all^a: Passed; passed.
(Question 3, Test t01, 1 marks): ^acount:base^a: FAILED; FAILED: exception oc-
curred: IndexError: list index out of range
(Question 3, Test t02, 1 marks): ^acount:3x3:zero^a: Passed; passed.
(Question 3, Test t03, 1 marks): ^acount:3x3:tl^a: Passed; passed.
(Question 3, Test t04, 1 marks): ^acount:3x3:t^a: Passed; passed.
(Question 3, Test t05, 1 marks): ^acount:3x3:tr^a: Passed; passed.
(Question 3, Test t06, 1 marks): ^acount:3x3:l^a: Passed; passed.
(Question 3, Test t07, 1 marks): ^acount:3x3:r^a: Passed; passed.
(Question 3, Test t08, 1 marks): ^acount:3x3:bl^a: Passed; passed.
(Question 3, Test t09, 1 marks): ^acount:3x3:b^a: Passed; passed.
(Question 3, Test t10, 1 marks): ^acount:3x3:br^a: Passed; passed.
(Question 3, Test t11, 1 marks): ^acount:3x3:corners:tl^a: Passed; passed.
(Question 3, Test t12, 1 marks): ^acount:3x3:corners:bl^a: Passed; passed.
(Question 3, Test t13, 1 marks): ^acount:3x3:corners:tr^a: Passed; passed.
(Question 3, Test t14, 1 marks): ^acount:3x3:corners:br^a: Passed; passed.
(Question 3, Test t15, 1 marks): ^acount:3x3:edges:t^a: Passed; passed.
(Question 3, Test t16, 1 marks): ^acount:3x3:edges:l^a: Passed; passed.
(Question 3, Test t17, 1 marks): ^acount:3x3:edges:r^a: Passed; passed.
(Question 3, Test t18, 1 marks): ^acount:3x3:edges:b^a: Passed; passed.
(Question 3, Test t19, 1 marks): ^acount:3x3:ignore_middle^a: Passed; passed.
(Question 3, Test t20, 1 marks): ^acount:4x4:all_numbers:8^a: Passed; passed.
(Question 3, Test t21, 1 marks): ^acount:4x4:all_numbers:7^a: Passed; passed.
(Question 3, Test t22, 1 marks): ^acount:4x4:all_numbers:6^a: Passed; passed.
(Question 3, Test t23, 1 marks): ^acount:4x4:all_numbers:5^a: Passed; passed.
(Question 3, Test t24, 1 marks): ^acount:4x4:all_numbers:4^a: Passed; passed.
(Question 3, Test t25, 1 marks): ^acount:4x4:all_numbers:3^a: Passed; passed.
(Question 3, Test t26, 1 marks): ^acount:4x4:all_numbers:2^a: Passed; passed.
(Question 3, Test t27, 1 marks): ^acount:narrow:two_edge:hori^a: FAILED; FAILE\
D:
exception occurred: IndexError: list index out of range
(Question 3, Test t28, 1 marks): ^acount:narrow:two_edge:vert^a: Passed; passe\
d.
(Question 3, Test t29, 1 marks): ^acount:narrow:corners:hori:left^a: FAILED;
FAILED: exception occurred: IndexError: list index out of range
(Question 3, Test t30, 1 marks): ^acount:narrow:corners:hori:right^a: FAILED;
FAILED: exception occurred: IndexError: list index out of range
(Question 3, Test t31, 1 marks): ^acount:narrow:corners:vert:left^a: Passed;
passed.
(Question 3, Test t32, 1 marks): ^acount:narrow:corners:vert:right^a: Passed;
passed.
(Question 4, Test t01, 1 marks): ^agame_won:1x1^a: Passed; passed.
(Question 4, Test t02, 1 marks): ^agame_won:3x3^a: Passed; passed.
(Question 4, Test t03, 1 marks): ^agame_won:3x3alt^a: Passed; passed.
(Question 4, Test t04, 1 marks): ^agame_won:2x2empty^a: Passed; passed.
(Question 4, Test t05, 1 marks): ^agame_won:2x2full^a: Passed; passed.
(Question 4, Test t06, 1 marks): ^agame_won:narrow:horiz^a: Passed; passed.
(Question 4, Test t07, 1 marks): ^agame_won:narrow:vert^a: Passed; passed.
138 control characters removed.
**** testing_result.txt *****************************************************************
Total number of tests missing: 17
powers: 1 of the 3 required cases were missing:
- Part A n or k is 1
count_wins: 1 of the 5 required cases were missing:
- Part B length 1 lists
add_lists: 2 of the 3 required cases were missing:
- Part C empty lists
- Part C length 1 lists
shouting: 1 of the 6 required cases were missing:
- Part A empty string
replace: 2 of the 5 required cases were missing:
- Part B empty string
- Part B length 1 list
keep_quotients: 2 of the 4 required cases were missing:
- Part C empty lists
- Part C dividing by 1
count_mines: 6 of the 12 required cases were missing:
- return vale is 4
- return vale is 5
- return vale is 6
- return vale is 7
- return vale is 8
- width and/or height of grid is 1
game_won: 2 of the 5 required cases were missing:
- Grid is all mines or all safe
- width and/or height of grid is 1
**** a04q1.py *****************************************************************
##===============================================
## <NAME> (20557203)
## CS 116 Winter 2018
## Assignment 04, Question 1
##===============================================
import math
import check
## Question 1
# Question1(a)
# powers_plus(n,k,a) returns a list containning the first k powers of n,
# starting with the ath power.
# powers_plus: Nat Nat Nat -> (Listof Nat)
# Examples:
# powers_plus(2,3,0) => [1,2,4]
def powers_plus(n,k,a):
if a == k:
return []
else:
return [n**a] + powers_plus(n,k,a+1)
# powers(n,k) returns a list containning the first k powers of n, starting with
# the 0th power.
# powers: Nat Nat -> (Listof Nat)
# Examples:
# powers(2,4) => [1,2,4,8]
def powers(n,k):
return powers_plus(n,k,0)
# Test1: n = 0
check.expect('Q1aT1', powers(0, 3), [1,0,0])
check.expect('Q1aT2', powers(0, 0), [])
# Test2: k = 0
check.expect('Q1aT3', powers(3, 0), [])
# Test3: k != 0 and n != 0
check.expect('Q1aT4', powers(3,4), [1,3,9,27])
# Question1(b)
# count_wins(lst1,lst2) returns the number of times that an element in the lst1
# is greater than the corresponding element in the lst2
# count_wins: (listof Nat) (listof Nat) -> Nat
# Examples:
# count_wins([1,2,0,4],[4,1,0,2]) => 2
def count_wins(lst1,lst2):
if lst1[0:] == [] and lst2[0:] == []:
return 0
elif lst1[0] > lst2[0]:
return 1 + count_wins(lst1[1:], lst2[1:])
else:
return 0 + count_wins(lst1[1:], lst2[1:])
# Test1: lst1 and lst2 equal to []
check.expect('Q1bT1', count_wins([],[]), 0)
# Test2: all lst1 elements greater than lst2
check.expect('Q1bT2', count_wins([5,6,7,8,9], [1,2,3,4,5]), 5)
# Test3: all lst2 elements greater than lst1
check.expect('Q1bT3', count_wins([1,2,3,4,5], [5,6,7,8,9]), 0)
# Test4: Regular Example
check.expect('Q1bT4', count_wins([5,2,8,4,5], [2,6,7,10,-1]), 3)
# Question1(c)
# add_lists_position(lst1, lst2, k) mutates lst1, values in lst1 from k position
# plus the corresponding values in the lst2. the function return None
# add_lists_position: (listof Int) (listof Int) Nat -> None
# Examples:
# a = [1,5,7], b = [3,4,6]
# add_lists([a],[b],1) => None
# After calling add_lists([a],[b]), a become [1,9,13]
def add_lists_position(lst1,lst2,k):
if k < len(lst1) or k < len(lst2):
lst1[k] = lst1[k] + lst2[k]
add_lists_position(lst1,lst2,k+1)
# add_lists(lst1, lst2) mutates lst1, each values in lst1 plus the corresponding
# values in the lst2. the function return None
# add_lists: (listof Int) (listof Int) -> None
# Examples:
# a = [1,5,7], b = [3,4,6]
# add_lists([a],[b]) => None
# After calling add_lists([a],[b]), a become [4,9,13]
def add_lists(lst1,lst2):
add_lists_position(lst1,lst2,0)
# Test1: Regular Test
a = [1,5,7]
b = [3,4,6]
check.expect('Q1cT1',add_lists(a,b), None)
check.expect('Q1cT1(a)',a, [4,9,13])
# Test2:
c = [-4,-2]
d = [-1,-3]
check.expect('Q1cT2',add_lists(c,d), None)
check.expect('Q1cT2(a)',c, [-5,-5])
**** a04q2.py *****************************************************************
##===============================================
## <NAME> (20557203)
## CS 116 Winter 2018
## Assignment 04, Question 2
##===============================================
import math
import check
## Question 2
# Question2(a)
# shouting(s) return True if the s contains more Capital Letters than it does
# lower case letters, else, return False
# shouting: Str -> Bool
# Examples:
# shouting("HELLO World!") => True
def shouting(s):
if len(list(filter(lambda high: high[:1].isupper(), s))) > \
len(list(filter(lambda low: low[:1].islower(), s))):
return True
else:
return False
# Test1: string is contain all same type letters
check.expect('Q2aT1', shouting("abcdefg"), False)
check.expect('Q2aT2', shouting("ASFKSD"), True)
check.expect('Q2aT3', shouting("123!@#"), False)
# Test2: Regular example
check.expect('Q2aT4', shouting("AFKLla"), True)
check.expect('Q2aT5', shouting("AFKLl123a"), True)
check.expect('Q2aT6', shouting("ALl3a"), False)
check.expect('Q2aT7', shouting("AFKL1235"), True)
# Question2(b)
# replace_fun(k, match, rep) return a number, if k is equal to match, then return
# rep, else return itself
# replace_fun: Int Int Int -> Int
# Examples:
# replace_fun(5, 5, 3) => 3
# replace_fun(5, 6, 3) => 5
def replace_fun(k, match, rep):
if k == match:
return rep
else:
return k
# replace(lst, match, rep) return a list with same number as lst, but all
# occurrences of match will be replaced by rep
# replace: (listof Int) Int Int -> (listof Int)
# Examples:
# replace([1,2,-1,2], 2, 0) => [1,0,-1,0]
def replace(lst,match,rep):
return list(map(lambda k: replace_fun(k, match, rep), lst))
# Test1: Regular Example
check.expect("Q2bT1", replace([1,2,-1,2], 2, 0), [1,0,-1,0])
check.expect("Q2bT2", replace([1,2,-1,2,4,2], 2, 5), [1,5,-1,5,4,5])
check.expect("Q2bT3", replace([2,2,2,2], 2, 0), [0,0,0,0])
check.expect("Q2bT4", replace([1,3,-1,5], 2, 0), [1,3,-1,5])
# Question2(c)
# keep_quotients(lst,n) return a list that contains the quotients the corresponding
# elements of lst by n
# keep_quotients: (listof Nat) Nat -> (listof Nat)
# Examples:
# keep_quotients([6,8,7,2], 2) => [3,4,1]
def keep_quotients(lst,n):
return list(map(lambda l: int(l/n),list(filter(lambda div_2: div_2%n == 0, lst))))
# Test
check.expect('Q2cT1', keep_quotients([6,8,7,2], 2), [3,4,1])
check.expect('Q2cT1', keep_quotients([6,8,9,2,3], 7), [])
check.expect('Q2cT1', keep_quotients([6,8,10,2], 2), [3,4,5,1])
check.expect('Q2cT1', keep_quotients([4,3,7,9,6], 3), [1,3,2])
**** a04q3.py *****************************************************************
##===============================================
## <NAME> (20557203)
## CS 116 Winter 2018
## Assignment 04, Question 3
##===============================================
import math
import check
# A MineGrid is a (listof (listof Bool))
# Requires: All lists are non-empty
# Each (listof Bool) has the same length
# note: True means mine, False means safe
# A MineBoard is a (listof (listof Str))
# Requires: Each string is either a mine ('*') hidden(' ')
# or safe (a digit between '0' and '8')
# All lists are non-empty
# Each (listof Str) has the same length
# Example board from the assignment file
grid3x3 = [[True ,False,False],
[False,False,False],
[False,False,True]]
board3x3 = [[' ', '1', '0'],
[' ', '2', '1'],
[' ', ' ', '*']]
grid4x4 = [[True,False,False,False,],
[False,False,False,False],
[False,False,True,True],
[True,False,True,False]]
board4x4 = [['','','',''],
['','','',''],
['','','',''],
['','','','']]
## Question 3
# count_mines_no(grid,row,col) returns how many mine tiles are near to the tile at
# the position row and col in the grid, includes diagonals and itself
# count_mines_no: MineBoard Nat Nat -> None
# requires: grid and board have the same dimensions and are consistent
# 0 <= row < height of board
# 0 <= col < width of board
# examples:
# count_mines_no(grid4x4,0,0) => 1
# count_mines_no(grid4x4,3,2) => 3
def count_mines_no(grid,row,col):
row_line = len(grid)-1
col_line = len(grid[1])
if row-1 < 0:
return len(list(filter(lambda s: s== True,grid[row][max(0,col-1):min(col_line,col+2)])))\
+ len(list(filter(lambda s: s== True,grid[min(row_line,row+1)][max(0,col-1):min(col_line,col+2)])))
elif row + 1 > row_line:
return len(list(filter(lambda s: s== True,grid[max(0,row-1)][max(0,col-1):min(col_line,col+2)])))\
+ len(list(filter(lambda s: s== True,grid[row][max(0,col-1):min(col_line,col+2)])))
else:
return len(list(filter(lambda s: s== True,grid[max(0,row-1)][max(0,col-1):min(col_line,col+2)])))\
+ len(list(filter(lambda s: s== True,grid[row][max(0,col-1):min(col_line,col+2)])))\
+ len(list(filter(lambda s: s== True,grid[min(row_line,row+1)][max(0,col-1):min(col_line,col+2)])))
# count_mines(grid,row,col) returns how many mine tiles are near to the tile at
# the position row and col in the grid, includes diagonals
# count_mines: MineBoard Nat Nat -> None
# requires: grid and board have the same dimensions and are consistent
# 0 <= row < height of board
# 0 <= col < width of board
# examples:
# count_mines(grid4x4,0,1) => 1
# count_mines(grid4x4,3,3) => 3
# count_mines(grid4x4,3,0) => 0
def count_mines(grid,row,col):
if grid[row][col]:
return 0
else:
return count_mines_no(grid,row,col)
# Test1 for helper function: No Mine surrounded
check.expect('Q3T1H', count_mines(grid3x3,2,0), 0)
check.expect('Q3T2H', count_mines(grid4x4,0,3), 0)
# Test2 for helper funciton: one Mine surrounded
check.expect('Q3T3H', count_mines(grid3x3,1,0), 1)
check.expect('Q3T4H', count_mines(grid4x4,1,0), 1)
check.expect('Q3T5H', count_mines(grid4x4,2,0), 1)
# Test3 for helper function: two or more Mine surrounded
check.expect('Q3T6H', count_mines(grid3x3,1,1), 2)
check.expect('Q3T7H', count_mines(grid4x4,3,1), 3)
check.expect('Q3T8H', count_mines(grid4x4,3,3), 3)
# Test4 for helper function: on Mine
check.expect('Q3T9H', count_mines(grid4x4,0,0), 0)
check.expect('Q3T10H', count_mines(grid4x4, 2,2),0)
# reveal(grid,board, row, col) reveals the tile at the given row and col(umn)
# in board, using the mine positions from grid
# reveal: MineGrid MineBoard -> None
# requires: grid and board have the same dimensions and are consistent
# 0 <= row < height of board
# 0 <= col < width of board
# effects: board is mutated
# Examples:
# reveal(grid3x3, board3x3, 0,0) => None, and changes contents of board3x3
# to [['*', '1', '0'], [' ', '2', '1'], [' ', ' ', '*']]
# reveal(grid3x3, board3x3, 1,0) => None, and changes contents of board3x3
# to [[' ', '1', '0'], ['1', '2', '1'], [' ', ' ', '*']]
def reveal(grid,board,row,col):
if grid[row][col]:
board[row][col] = '*'
else:
board[row][col] = str(count_mines(grid,row,col))
# Tests:
grid3x3 = [[True ,False,False],
[False,False,False],
[False,False,True]]
board3x3 = [[' ', '1', '0'],
[' ', '2', '1'],
[' ', ' ', '*']]
# Test1: the position is mine
check.expect('Q3T1', reveal(grid3x3,board3x3,0,0), None)
check.expect('Q3T1(M)', board3x3, [['*', '1', '0'], [' ', '2', '1'],[' ', ' ', '*']])
# Test2: the position is surrounded by 1 mine
check.expect('Q3T2', reveal(grid3x3,board3x3,1,0), None)
check.expect('Q3T2(1)', board3x3, [['*', '1', '0'], ['1', '2', '1'],[' ', ' ', '*']])
check.expect('Q3T3', reveal(grid3x3,board3x3,2,1), None)
check.expect('Q3T3(1)', board3x3, [['*', '1', '0'], ['1', '2', '1'],[' ', '1', '*']])
# Test3: the position is not surrounded by mine
check.expect('Q3T4', reveal(grid3x3,board3x3,2,0), None)
check.expect('Q3T4(None)', board3x3, [['*', '1', '0'], ['1', '2', '1'],['0', '1', '*']])
**** a04q4.py *****************************************************************
##===============================================
## <NAME> (20557203)
## CS 116 Winter 2018
## Assignment 04, Question 4
##===============================================
import math
import check
# Data definition for Q3 + Q4
# A MineGrid is a (listof (listof Bool))
# Requires: All lists are non-empty
# Each (listof Bool) has the same length
# note: True means mine, False means safe
# A MineBoard is a (listof (listof Str))
# Requires: Each string is either a mine ('*') hidden(' ')
# or safe (a digit between '0' and '8')
# All lists are non-empty
# Each (listof Str) has the same length
# Example board from the assignment file
grid3x3 = [[True ,False,False],
[False,False,False],
[False,False,True]]
board3x3 = [[' ', '1', '0'],
[' ', '2', '1'],
[' ', ' ', '*']]
board3x3_win = [[' ', '1', '0'],
['1', '2', '1'],
['0', '1', ' ']]
## Question 4
# game_lost(board) returns true if board contains one or more revealed mines,
# false otherwise
# game_lost: GameBoard -> Bool
def game_lost(board):
mined_rows = len(list(filter(lambda row: '*' in row, board)))
return mined_rows != 0
# line_be_grid_position(lst,b) mutates lst such that each elements in lst is
# replaced by True or False start from b position
# Effect: lst is mutated
# line_be_grid_position: (listof Str) Nat -> None
# Requires:
# 0 <= b < length of lst
# Example:
# if k = [' ', '1', '0'], then line_be_grid_position(k, 0) is called, k is
# now [True, False, False]
def line_be_grid_position(lst,b):
if b < len(lst):
if lst[b] == ' ':
lst[b] = True
line_be_grid_position(lst, b+1)
else:
lst[b] = False
line_be_grid_position(lst, b+1)
# line_be_grid(lst) return a list of True or False by replacing the element
# in lst
# line_be_grid: (listof Str) -> (listof Bool)
# Example:
# if k = [' ', ' ', '0'], then line_be_grid(k) => [True, True, False]
# if k = ['2', '1', '0'], then line_be_grid(k) => [False, False, False]
def line_be_grid(lst):
line_be_grid_position(lst, 0)
return lst
# game_won(grid, board) return True if the game has been won (all safe tiles
# are revealed and no mine tile), else False
# game_won: MineGrid Gameboard -> Bool
# Examples:
# game_won(grid3x3, board3x3) => False
# game_won(grid3x3, board3x3_win) => False
def game_won(grid,board):
k = list(map(line_be_grid, board))
if k == grid:
return True
else:
return False
# Tests:
# Test1: Not reveal all the safe or even reveal a mine
board3x3_not_all_safe = [[' ', '1', '0'],
[' ', '2', '1'],
['0', '1', ' ']]
check.expect('Q4T1', game_won(grid3x3, board3x3), False)
check.expect('Q4T2', game_won(grid3x3, board3x3_not_all_safe), False)
# Test2: reveal all the safe and some mine
board3x3_mine = [['*', '1', '0'],
['1', '2', '1'],
['0', '1', ' ']]
check.expect('Q4T3', game_won(grid3x3, board3x3_mine), False)
# Test3: reveal all the safe and no mine
check.expect('Q4T4', game_won(grid3x3, board3x3_win), True)
**** End of graded assignment. *************************************************
```
#### File: a06-j4mai/a06-j4mai/a06q4.py
```python
import check
# Question 4 - part a
# max_height(desc) consumes a Str, desc, and returns the maximum height reached
# max_height: Str -> Nat
# Requires:
# desc is guaranteed to have the following properties:
# (1) it contains only the characters '+' and '-'.
# (2) The two characters appear the same number of times.
# (3) Every substring of desc that starts at zero has at
# least as many '+' characters as '-'.
# Examples:
testdata = '+++--+++---+-++---'
# max_height(testdata) => 4
# max_height('+-') => 1
# max_height('+++---') => 3
# max_height('+-+-+-') => 1
# max_height('') => 0
def max_height(desc):
height = 0
max_height = 0
if desc == '':
return height
for k in list(desc):
if k == '+':
height += 1
max_height = max(height, max_height)
else:
height -= 1
max_height = max(height, max_height)
return max_height
# Test
# Test1: Examples
check.expect('Example1', max_height(testdata), 4)
check.expect('Example2', max_height('+-'), 1)
check.expect('Example3', max_height('+++---'), 3)
check.expect('Example4', max_height('+-+-+-'), 1)
check.expect('Example5', max_height(''), 0)
# Test2: Complex height
check.expect('Complex1', max_height('+++-++-++-++-------'), 6)
check.expect('Complex2', max_height('+-+++-+---'), 3)
check.expect('Complex3', max_height('+++-++-+++------'), 6)
check.expect('Complex4', max_height('+++---+++---+++---'), 3)
# Question 4 - part b
# height(desc) consumes a Str, desc, and returns the height reached.
# height: Str -> Nat
# Examples:
# height(testdata) => 0
# height('++-') => 1
# height('+++-') => 2
# height('++-+++-') => 3
def height(desc):
height = 0
max_height = 0
if desc == '':
return height
for k in list(desc):
if k == '+':
height += 1
else:
height -= 1
return height
# render_mountain(desc) consumes a Str, desc, and returns a (listof Str), which
# could be printed as the mountain
# render_mountain: Str -> (listof Str)
# Requires:
# desc is guaranteed to have the following properties:
# (1) it contains only the characters '+' and '-'.
# (2) The two characters appear the same number of times.
# (3) Every substring of desc that starts at zero has at
# least as many '+' characters as '-'.
# Examples:
# render_mountain('++-++---') => [' /\\ ', ' /\\/ \\ ', '/ \\']
def render_mountain(desc):
total_height = max_height(desc)
length = len(desc)
empty_list = []
k = 0
while k < total_height:
empty_list.append([])
z = 0
while z < length:
empty_list[k].append(' ')
z += 1
k += 1
start = 0
up = '/'
down = '\\'
while start < length:
if desc[start] == '+':
height_desc = -height(desc[:start+1])
empty_list[height_desc][start] = up
start += 1
else:
height_desc = -height(desc[:start])
empty_list[height_desc][start] = down
start += 1
for i in range(len(empty_list)):
empty_list[i] = ''.join(empty_list[i])
return empty_list
# Test:
# Test1: Sample
check.expect('simple',
render_mountain('++-++---'),
[
' /\\ ',
' /\\/ \\ ',
'/ \\'
])
# Test2: easy test
check.expect('easy test1',
render_mountain('+-'),
['/\\'])
check.expect('easy test2',
render_mountain('+-+-+-+-'),
['/\\/\\/\\/\\'])
# Test3: complex test
check.expect('complex test1',
render_mountain('+-++--+++---'),
[
' /\\ ',
' /\\ / \\ ',
'/\\/ \\/ \\'
])
check.expect('complex test2',
render_mountain('+++--+++---+-++---'),
[
' /\\ ',
' /\\ / \\ /\\ ',
' / \\/ \\/\\/ \\ ',
'/ \\'
])
```
#### File: a07-j4mai/a07-j4mai/a07q1.py
```python
def fn_a(L):
def helper(M, m):
n = 0 # O(1)
for x in M: # n cycle
if x == m: # O(1)
n = n + 1 # O(1)
return n # O(1)
L1 = list(filter(lambda x: x > helper(L, x), L)) # n* O(n) => O(n^2)
return len(L1) # O(1)
# (b)
# let n = len(s), c is a string of length 1
def fn_b(s, c):
if s[0] == c or s[-1] == c: # O(1)
print('The begins or ends with {0}'.format(c)) # O(1)
# (c)
# n is a natural number
def fn_c(n):
if n == 0: # O(1)
return 1 # O(1)
elif n % 2 == 0: # O(1)
return fn_c(n - 1) + fn_c(n - 1) ** 2 # 2T(n-1)
else:
return 2 * fn_c(n - 1) # 2T(n-1)
# (d)
# let n = len(L)
def fn_d(L, x):
for i in range(len(L)): # n cycle
j = i # O(1)
while j < len(L): # n cycle
if L[i] + L[j] == x: #O(1)
return i + j # O(1)
j = j + 1 # O(1)
return -1 # O(1)
# (e)
# let n = len(s)
def fn_e(s):
s1 = list(filter(lambda c: c.isdigit(), s)) # O(n)
s2 = list(filter(lambda c: c.isupper(), s)) # O(n)
s3 = list(filter(lambda c: c.islower(), s)) # O(n)
return s1 + s2 + s3 # O(1)
# (f)
# let n = len(L)
def fn_f(L):
def helper(M, n):
m = n // 2 # O(1)
if n >= len(M): # O(1)
return 1 # O(1)
if M[n] > M[0]: # O(1)
return M[0] + helper(M, m) # O(1) + T(n/2)
return M[0] + M[n] # O(1)
return helper(L, len(L) - 1) # T(n)
# Place one of A,B,C,D,E or F inside the string quotes;
#e.g., if you think fn_a has a running time of O(2**n),
#then change a_answer = "" to a_answer = "F".
#
# Choose:
# A. O(1)
# B. O(log n)
# C. O(n)
# D. O(n log n)
# E. O(n**2)
# F. O(2**n)
a_answer = "E"
b_answer = "A"
c_answer = "F"
d_answer = "E"
e_answer = "C"
f_answer = "B"
```
#### File: a07-j4mai/a07-j4mai/GRADED_ASSIGNMENT.py
```python
ASSIGNMENT 07
Student's Quest ID: j4mai
**** Testing Results **********************************************************
37/41 Total Mark
** Question 1: 6/6
** Question 2: 7/7
** Question 3: 12/16
** Question 4: 12/12
(Question 1, Test t01, 1 marks): Question 1a: Passed; passed.
(Question 1, Test t02, 1 marks): Question 1b: Passed; passed.
(Question 1, Test t03, 1 marks): Question 1c: Passed; passed.
(Question 1, Test t04, 1 marks): Question 1d: Passed; passed.
(Question 1, Test t05, 1 marks): Question 1e: Passed; passed.
(Question 1, Test t06, 1 marks): Question 1f: Passed; passed.
(Question 2, Test t01, 1 marks): simple increasing test: Passed; passed.
(Question 2, Test t02, 1 marks): simple flat and increasing test: Passed;
passed.
(Question 2, Test t03, 1 marks): decreasing test: Passed; passed.
(Question 2, Test t04, 1 marks): length of 1: Passed; passed.
(Question 2, Test t05, 1 marks): empty list: Passed; passed.
(Question 2, Test t06, 1 marks): short list and more than one increasing se-
quences: Passed; passed.
(Question 2, Test t07, 1 marks): long list and more than one increasing se-
quences: Passed; passed.
(Question 3, Test t01, 1 marks): simple tests:one binary search: Passed;
passed.
(Question 3, Test t02, 1 marks): simple tests:no binary search: Passed;
passed.
(Question 3, Test t03, 1 marks): simple tests:not exist:binary search: FAILED;
FAILED: got (['Galloping search from index 0', 'Galloping search from in-
dex 1', 'Galloping search from index 3'], False) expected (['Galloping
search from index 0', 'Galloping search from index 1', 'Galloping search
from index 3', 'Binary search from index 2 to 2'], False)
(Question 3, Test t04, 1 marks): longer list:binary search: Passed; passed.
(Question 3, Test t05, 1 marks): longer list:not exist;last elemnt: Passed;
passed.
(Question 3, Test t06, 1 marks): longer list:not exist;one binary search:
FAILED; FAILED: got (['Galloping search from index 0', 'Galloping search
from index 1', 'Galloping search from index 3', 'Galloping search from in-
dex 7', 'Galloping search from index 9'], False) expected (['Galloping
search from index 0', 'Galloping search from index 1', 'Galloping search
from index 3', 'Galloping search from index 7', 'Galloping search from in-
dex 9', 'Binary search from index 8 to 8'], False)
(Question 3, Test t07, 1 marks): longer list:one binary search: FAILED;
FAILED: got (['Galloping search from index 0', 'Galloping search from in-
dex 1', 'Galloping search from index 3', 'Galloping search from index 7',
'Binary search from index 8 to 9'], 8) expected (['Galloping search from
index 0', 'Galloping search from index 1', 'Galloping search from index
3', 'Galloping search from index 7', 'Galloping search from index 9', 'Bi-
nary search from index 8 to 8'], 8)
(Question 3, Test t08, 1 marks): length of list is 1,exist: Passed; passed.
(Question 3, Test t09, 1 marks): length of list is 1,not exist: Passed;
passed.
(Question 3, Test t10, 1 marks): length of list is 2,not exist: Passed;
passed.
(Question 3, Test t11, 1 marks): length of list is 3,not exist: Passed;
passed.
(Question 3, Test t12, 1 marks): length of list is 3,exist: Passed; passed.
(Question 3, Test t13, 1 marks): long list;binary search on the first half:
Passed; passed.
(Question 3, Test t14, 1 marks): long list;binary search on the latter half:
FAILED; FAILED: got (['Galloping search from index 0', 'Galloping search
from index 1', 'Galloping search from index 3', 'Galloping search from in-
dex 7', 'Binary search from index 8 to 13'], 12) expected (['Galloping
search from index 0', 'Galloping search from index 1', 'Galloping search
from index 3', 'Galloping search from index 7', 'Galloping search from in-
dex 14', 'Binary search from index 8 to 13'], 12)
(Question 3, Test t15, 1 marks): first element is greater than the target:
Passed; passed.
(Question 3, Test t16, 1 marks): longer list:not exist;no binary search:
Passed; passed.
(Question 4, Test t01, 1 marks): part a:empty list: Passed; passed.
(Question 4, Test t02, 1 marks): part a: only one element in the lst: Passed;
passed.
(Question 4, Test t03, 1 marks): part a: first lst is empty but the other lsts
are not: Passed; passed.
(Question 4, Test t04, 1 marks): part a: last lst is empty but the other lsts
are not: Passed; passed.
(Question 4, Test t05, 1 marks): part a: last lst is empty but the other lsts
are not: Passed; passed.
(Question 4, Test t06, 1 marks): part a: hard test long list and large number:
Passed; passed.
(Question 4, Test t07, 1 marks): part b: length of list is 0: Passed; passed.
(Question 4, Test t08, 1 marks): part b: length of list is 1: Passed; passed.
(Question 4, Test t09, 1 marks): part b: length of list is 2: Passed; passed.
(Question 4, Test t10, 1 marks): part b: length of list is 3: Passed; passed.
(Question 4, Test t11, 1 marks): part b: length of list is 5: Passed; passed.
(Question 4, Test t12, 1 marks): part b: long list: Passed; passed.
**** testing_result.txt *****************************************************************
Total number of tests missing: 3
count_longest_asc: 1 of the 6 required cases were missing:
- len(L) > 1 and list contains only one number repeated
galloping_search: All 4 required cases were tested.
merge3: 2 of the 5 required cases were missing:
- L1 empty, L2 & L3 non-empty
- L2 empty, L1 & L3 non-empty
mergesort3: All 5 required cases were tested.
**** a07q1.py *****************************************************************
##===============================================
## <NAME> (20557203)
## CS 116 Winter 2018
## Assignment 07, Question 1
##===============================================
## Make sure to follow question 1 as directed.
# Question 1.
#
# Determine the worst-case runtime of the following functions.
# The answer will be stated in terms of the size of the problem.
# Some bounds may appear more than once.
#
# Note. In all cases, choose the 'tightest' bound.
#
# Choose
# A. O(1)
# B. O(log n)
# C. O(n)
# D. O(n log n)
# E. O(n**2)
# F. O(2**n)
# (a)
# Let n = len(L)
def fn_a(L):
def helper(M, m):
n = 0 # O(1)
for x in M: # n cycle
if x == m: # O(1)
n = n + 1 # O(1)
return n # O(1)
L1 = list(filter(lambda x: x > helper(L, x), L)) # n* O(n) => O(n^2)
return len(L1) # O(1)
# (b)
# let n = len(s), c is a string of length 1
def fn_b(s, c):
if s[0] == c or s[-1] == c: # O(1)
print('The begins or ends with {0}'.format(c)) # O(1)
# (c)
# n is a natural number
def fn_c(n):
if n == 0: # O(1)
return 1 # O(1)
elif n % 2 == 0: # O(1)
return fn_c(n - 1) + fn_c(n - 1) ** 2 # 2T(n-1)
else:
return 2 * fn_c(n - 1) # 2T(n-1)
# (d)
# let n = len(L)
def fn_d(L, x):
for i in range(len(L)): # n cycle
j = i # O(1)
while j < len(L): # n cycle
if L[i] + L[j] == x: #O(1)
return i + j # O(1)
j = j + 1 # O(1)
return -1 # O(1)
# (e)
# let n = len(s)
def fn_e(s):
s1 = list(filter(lambda c: c.isdigit(), s)) # O(n)
s2 = list(filter(lambda c: c.isupper(), s)) # O(n)
s3 = list(filter(lambda c: c.islower(), s)) # O(n)
return s1 + s2 + s3 # O(1)
# (f)
# let n = len(L)
def fn_f(L):
def helper(M, n):
m = n // 2 # O(1)
if n >= len(M): # O(1)
return 1 # O(1)
if M[n] > M[0]: # O(1)
return M[0] + helper(M, m) # O(1) + T(n/2)
return M[0] + M[n] # O(1)
return helper(L, len(L) - 1) # T(n)
# Place one of A,B,C,D,E or F inside the string quotes;
#e.g., if you think fn_a has a running time of O(2**n),
#then change a_answer = "" to a_answer = "F".
#
# Choose:
# A. O(1)
# B. O(log n)
# C. O(n)
# D. O(n log n)
# E. O(n**2)
# F. O(2**n)
a_answer = "E"
b_answer = "A"
c_answer = "F"
d_answer = "E"
e_answer = "C"
f_answer = "B"
**** a07q2.py *****************************************************************
##===============================================
## <NAME> (20557203)
## CS 116 Winter 2018
## Assignment 07, Question 2
##===============================================
import check
# Question 2
# count_longest_asc(L) consumes a list of intergers, L, and returns the
# the length of the longest run of ascending numbers in the list
# count_longest_asc: (listof Int) -> Int
# Requires:
# function cannot use recursion
# function must go through the list exactly once
# cannot create any new lists
# Examples:
# count_longest_asc([9, 1, 7, 8, 9, 4, 2]) => 4
# count_longest_asc([6, 4, 4, 2]) => 1
def count_longest_asc(L):
if L == []:
return 0
if len(L) == 1:
return 1
i = 0
j = 1
k = []
while i+1 < len(L):
if L[i] < L[min(i+1, len(L))]:
i += 1
j += 1
k.append(j)
else:
i += 1
j = 1
k.append(j)
return max(k)
# Test:
# Test1: Examples
l = [9,1,7,8,9,4,2]
check.expect('E1', count_longest_asc(l), 4)
check.expect('E2', count_longest_asc([6,4,4,2]), 1)
# Test2: Only increasing list
check.expect('Q2T1', count_longest_asc([1,3,4,5,6]), 5)
check.expect('Q2T2', count_longest_asc([1,6]), 2)
check.expect('Q2T3', count_longest_asc([1,2,3,4,5,6,8,100]), 8)
# Test3: Only one element in list
check.expect('Q2T4', count_longest_asc([1]), 1)
check.expect('Q2T5', count_longest_asc([]), 0)
check.expect('Q2T6', count_longest_asc([96]), 1)
# Test4: Only decreasing list
check.expect('Q2T7', count_longest_asc([9,8,7,6]), 1)
check.expect('Q2T8', count_longest_asc([15,9,5,3,2,1]), 1)
check.expect('Q2T9', count_longest_asc([10,9,8,7,6,5,4,3,2,1,0]), 1)
# Test5: Mix list
check.expect('Q2T10', count_longest_asc([1,3,4,5,3,4,2,8,6]), 4)
check.expect('Q2T11', count_longest_asc([1,3,4,5,6,2,1,3,4,5]), 5)
check.expect('Q2T12', count_longest_asc([1,3,2,4,8,5,6]), 3)
**** a07q3.py *****************************************************************
##===============================================
## <NAME> (20557203)
## CS 116 Winter 2018
## Assignment 07, Question 3
##===============================================
import check
# Question 3
# Note: For *the binary_search function only* you can change
# the design recipe and implementation as required (you will need to)
# in order to adapt the function for your solution to Q3
# binary_search(L, target) returns True if target is
# in L and False otherwise
# binary_search: (listof Int) Int -> Bool
def binary_search(L, target):
beginning = 0
end = len(L) - 1
while beginning <= end:
middle = beginning + (end - beginning) // 2
if L[middle] == target:
return True
elif L[middle] > target:
end = middle - 1
else:
beginning = middle + 1
return False
# galloping_search(n, L): consumes a number, n, and a non-empty sorted list
# of natural number, L, and returns the index of the element or Fase. And
# print message where the index star from
# galloping_search: Int (listof Int) -> False or Int
# Examples:
# galloping_search(14, [1, 2, 5, 7, 9, 14, 15, 23, 29]) => 5
# and prints 'Galloping search from index {}', 4 times, once per line.
# and printed once 'Binary search from index 4 to 6' at last
def galloping_search(n, L):
start = 0
length_L = len(L)
k = []
while 2**start <= length_L:
if L[2**start-1] < n:
print('Galloping search from index {}'.format(2**start-1))
start += 1
k.append(start)
else:
print('Galloping search from index {}'.format(2**start-1))
start += 1
k.append(start)
start += 1000
if len(k) > 1:
last = 2**(k[-1]-1)-1
sec_last = 2 ** (k[-2]-1)-1
else:
last = 0
sec_last = 0
if binary_search(L, n):
pos = L.index(n)
if pos < last:
print('Binary search from index {} to {}'.format(sec_last+1, last-1))
elif pos == last:
None
elif pos < length_L-1:
print('Binary search from index {} to {}'.format(last+1, pos+1))
else:
print('Galloping search from index {}'.format(length_L-1))
return pos
else:
if L[last] > n:
None
elif L[-1] > n:
print('Galloping search from index {}'.format(length_L-1))
else:
print('Galloping search from index {}'.format(length_L-1))
return False
galloping_search(5, [1,2,3,4,5,6,7,8])
# Test:
# Test1: n in the L
check.set_screen('Galloping search 0,1,3,7 Binary search 4 to 6')
check.expect('T1', galloping_search(5, [1,2,3,4,5,6,7,8]), 4)
check.set_screen('Galloping search 0')
check.expect('T2', galloping_search(1, [1,2,3,4,5,6,7,8,9]), 0)
check.set_screen('Galloping search 0,1,3,7 Binary search 4 to 6')
check.expect('T3', galloping_search(7, [1,2,3,4,5,6,7,8,9]), 6)
check.set_screen('Galloping search 0,1,3,7')
check.expect('T4', galloping_search(8, [1,2,3,4,5,6,7,8,9]), 7)
check.set_screen('Galloping search 0,1,3,7,8')
check.expect('T5', galloping_search(9, [1,2,3,4,5,6,7,8,9]), 8)
# Test2: n not in the L
check.set_screen('Galloping search 0,1,3,7')
check.expect('T6', galloping_search(12, [1,3,5,7,9,11,13,15,17]), False)
check.set_screen('Galloping search 0,1,3,7,8')
check.expect('T7', galloping_search(16, [1,3,5,7,9,11,13,15,17]), False)
check.set_screen('Galloping search 0,1,3')
check.expect('T8', galloping_search(6, [1,3,5,7,9,11,13,15,17]), False)
check.set_screen('Galloping search 0,1,3')
check.expect('T9', galloping_search(4, [1,3,5,7,9,11,13,15,17]), False)
check.set_screen('Galloping search 0,1,3,7,8')
check.expect('T10', galloping_search(18, [1,3,5,7,9,11,13,15,17]), False)
# Test3: Examples on Piazza
check.set_screen('Galloping search 0')
check.expect('T11', galloping_search(1, [2,3,4,5,6]), False)
check.set_screen('Galloping search 0,1')
check.expect('T12', galloping_search(3, [2,4,5,6,7]), False)
check.set_screen('Galloping search 0,1,2')
check.expect('T13', galloping_search(3, [1,2,4]), False)
check.set_screen('Galloping search 0,1,3,7,8')
check.expect('T14', galloping_search(8, [0,1,2,3,4,5,6,7,9]), False)
check.set_screen('Galloping search 0,1,3,4')
check.expect('T15', galloping_search(5, [0,1,2,3,4]), False)
check.set_screen('Galloping search 0,1,3,7,9, Binary search 8 to 8')
check.expect('T16', galloping_search(10, [1,2,3,4,5,6,7,8,9,100]), False)
check.set_screen('Galloping search 0,1,3,7, Binary search 8 to 13')
check.expect('T17', galloping_search(13, [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]), 12)
**** a07q4.py *****************************************************************
##===============================================
## <NAME> (20557203)
## CS 116 Winter 2018
## Assignment 07, Question 4
##===============================================
import math
import check
# Question 4 - part a
# helper_merge3(L1,L2) consumes two lists of natural numbers, L1 & L2, and
# returns a new list contain L1 and L2 in ascending order
# helper_merge3: (listof Nat) (listof Nat) -> (listof Nat)
# Examples:
# helper_merge3([1,2],[5,8]) => [1,2,5,8]
def helper_merge3(L1,L2):
L = L1 + L2
i = 0
j = 0
index_L = 0
while i < len(L1) and j < len(L2):
if L1[i] < L2[j]:
L[index_L] = L1[i]
i += 1
else:
L[index_L] = L2[j]
j += 1
index_L += 1
while i < len(L1):
L[index_L] = L1[i]
i += 1
index_L += 1
while j < len(L2):
L[index_L] = L2[j]
j += 1
index_L += 1
return L
# merge3(L1,L2,L3) consumes three lists of natural numbers, L1, L2 & L3, and
# returns a new list contain L1 L2 and L3 in ascending order
# merge3: (listof Nat) (listof Nat) (listof Nat) -> (listof Nat)
# Examples:
# merge3([1, 5], [2, 7], [3, 4, 9]) =>[1, 2, 3, 4, 5, 7, 9]
def merge3(L1, L2, L3):
return helper_merge3(helper_merge3(L1,L2),L3)
# Test:
# Test1: Examples Test
check.expect('PartaT1', merge3([1,5], [2,7], [3,4,9]), [1,2,3,4,5,7,9])
# Test2: Non empty order
check.expect('PartaT2', merge3([1,7],[4,5,6], [8,9,16]),[1,4,5,6,7,8,9,16])
check.expect('PartaT3', merge3([1,9],[2,2,2], [9,9,9]),[1,2,2,2,9,9,9,9])
check.expect('PartaT4', merge3([2,13],[2,12],[2,11]),[2,2,2,11,12,13])
check.expect('PartaT5', merge3([3,10], [2,9], [1, 6]), [1,2,3,6,9,10])
# Test3: empty order
check.expect('PartaT6', merge3([1,7],[4,5,6],[]),[1,4,5,6,7])
check.expect('PartaT7', merge3([], [],[1,2]),[1,2])
check.expect('PartaT8', merge3([],[], []),[])
# Question 4 - part b
# mergesort3(L): consumes a list of natural number, L, and returns a list of
# natural number in ascending order
# mergesort3: (listof Nat) -> (listof Nat)
# Examples:
# mergesort3([2, 6, 1, 9, 3]) => [1, 2, 3, 6, 9]
def mergesort3(L):
if len(L) > 1:
first = math.ceil(len(L)/3)
second = 2* first
L1 = L[0:first]
L2 = L[first: second]
L3 = L[second:]
L1 = mergesort3(L1)
L2 = mergesort3(L2)
L3 = mergesort3(L3)
return merge3(L1,L2,L3)
else:
return L
# Test
# Test1: ascending order
check.expect('PartbT1', mergesort3([1,2,3,4,5,6,7,8,9]), [1,2,3,4,5,6,7,8,9])
check.expect('PartbT2', mergesort3([1,3,5,7,9,11,13,15,17]), [1,3,5,7,9,11,13,15,17])
check.expect('PartbT3', mergesort3([2,4,6,8,10,22,30,31,40]), [2,4,6,8,10,22,30,31,40])
# Test2: decreasing order
check.expect('PartbT4', mergesort3([9,8,7,6,5,4,3,2,1]), [1,2,3,4,5,6,7,8,9])
check.expect('PartbT5', mergesort3([17,15,13,11,9,7,5,3,1]), [1,3,5,7,9,11,13,15,17])
check.expect('PartbT6', mergesort3([40,31,30,22,10,8,6,4,2]), [2,4,6,8,10,22,30,31,40])
# Test3: Chaos order
check.expect('PartbT7', mergesort3([2,4,1,8,3,0,5,2,3]), [0,1,2,2,3,3,4,5,8])
check.expect('PartbT8', mergesort3([2,3,1]), [1,2,3])
check.expect('PartbT9', mergesort3([88,33,99,11,44,22,0]), [0,11,22,33,44,88,99])
# Test4: Empty or len(L) < 3
check.expect('PartbT10', mergesort3([]), [])
check.expect('PartbT11', mergesort3([7,4]), [4,7])
check.expect('PartbT12', mergesort3([1]), [1])
**** End of graded assignment. *************************************************
``` |
{
"source": "JohnMai1994/cs234-2018_Spring_Term",
"score": 3
} |
#### File: CS234_code/A3/set.py
```python
INIT_SIZE = 16
ALPHA_TRIG = 2/3
class Set:
# Set() returns an empty Set
# __init__: Set -> None
def __init__(self):
self.count = 0
self.size = 16
self.alpha_trig = 2/3
self.capacity = [None] * self.size
# item in self returns True if item is a member of the set Self, else False
# __contains__: Set Any -> Bool
def __contains__(self, item):
if self.count == 0:
return False
for i in self.capacity:
if i == item:
return True
return False
# len(self) returns the number of values currently in self
# __len__: Set -> Nat
def __len__(self):
length = 0
for i in self.capacity:
if (i != None) :
length = length + 1
return length
# add(self, v) adds the number v to self, and resize when the load factor
# exceeds 2/3, by consuming self and v
# add: Set Nat -> None
def add(self,v):
pos = hash(v) % self.size
if (self.capacity[pos] == v):
return None
while (self.capacity[pos] != None):
pos = hash(pos) % self.size + 1
self.capacity[pos] = v
self.count = self.count + 1
if (self.count >= self.alpha_trig * self.size):
self.size = (2 * self.size)
new_list = [None] * self.size
for i in self.capacity:
if i != None:
new_pos = hash(i) % self.size
while (new_list[pos] != None):
new_pos = hash(new_pos) % self.size + 1
new_list[new_pos] = i
self.capacity = new_list
# remove(self, v) remove the number v from self by consuming self and v
# remove: Set Nat -> None
def remove(self,v):
pos = hash(v)% self.size
while (self.capacity[pos] != v):
if self.capacity[pos] == None:
raise ValueError("Can't find the value")
pos = hash(pos) % self.size + 1
self.capacity[pos] = None
# union(self, other) return a new set which combina two set, self and
# other, together by consuming self and other
# union: Set Set -> Set
# Require:
# self and other should be Set or None
def union(self,other):
new_union = Set()
for i in other.capacity: # first loop to put the element in other into
new_union.add(i) # the set of new_union O(m)
for k in self.capacity: # second loop to put the element in self into
if k not in new_union.capacity: # the set of new_union O(n)
new_union.add(k)
return new_union
# intersection(self, other) return a new set which contains all elements that
# are in both sets, by consuming self and other
# intersection: Set Set -> Set
# Require:
# self and other should be Set or None
def intersection(self,other):
new_intersection = Set()
for i in self.capacity:
if i in other.capacity:
new_intersection.add(i)
return new_intersection
# subtract(self, other) return a new set that the elements in self but not
# in the other by consuming self and other
# subtract: Set Set -> Set
# Require:
# self and other should be Set or None
def subtract(self,other):
new_subtract = Set()
for i in self.capacity: # loop for all element in self: O(n)
if i not in other.capacity: # if statement, which is O(1)
new_subtract.add(i) # add operation O(1)
return new_subtract
a = Set()
a.add(5)
a.add(1)
a.add(3)
a.add(2)
a.add(6)
a.add(7)
a.add(8)
a.add(9)
a.add(10)
a.add(11)
a.add(22)
a.add(13)
``` |
{
"source": "JohnMalmberg/daos",
"score": 2
} |
#### File: ftest/container/GlobalHandle.py
```python
import ctypes
import os
import time
import traceback
import sys
import json
from multiprocessing import Process, sharedctypes
from avocado import Test
sys.path.append('./util')
sys.path.append('../util')
sys.path.append('../../../utils/py')
sys.path.append('./../../utils/py')
import ServerUtils
import WriteHostFile
import CheckForPool
import daos_api
import daos_cref
from daos_api import DaosContext
from daos_api import DaosPool
from daos_api import DaosContainer
from daos_cref import *
def CheckHandle(pool_glob_handle, uuidstr, cont_glob_handle, rank):
"""
This gets run in a child process and verifyes the global
handles can be turned into local handles in another process.
"""
try:
# get paths from the build_vars generated by build
with open('../../../.build_vars.json') as f:
build_paths = json.load(f)
# setup the DAOS python API in this process
context = DaosContext(build_paths['PREFIX'] + '/lib/')
# setup the pool and connect using global handle
pool = DaosPool(context)
pool.uuid = uuidstr
pool.set_svc(rank)
pool.group = "daos_server"
buf = ctypes.cast(pool_glob_handle.iov_buf,
ctypes.POINTER(ctypes.c_byte * pool_glob_handle.iov_buf_len))
buf2 = bytearray()
buf2.extend(buf.contents)
pool_handle = pool.global2local(context,
pool_glob_handle.iov_len,
pool_glob_handle.iov_buf_len,
buf2)
# perform an operation that will use the new handle, if it
# doesn't throw an exception, then all is well.
pool.pool_query()
# setup the container and then connect using the global handle
container = DaosContainer(context)
container.poh = pool_handle
buf = ctypes.cast(cont_glob_handle.iov_buf,
ctypes.POINTER(ctypes.c_byte *
cont_glob_handle.iov_buf_len))
buf2 = bytearray()
buf2.extend(buf.contents)
cont_handle = container.global2local(context,
cont_glob_handle.iov_len,
cont_glob_handle.iov_buf_len,
buf2)
# just try one thing to make sure handle is good
container.query()
except ValueError as e:
print(e)
print(traceback.format_exc())
raise
return
class GlobalHandle(Test):
"""
This class contains tests to verify the ability to share container
handles amoung processes.
"""
def setUp(self):
# get paths from the build_vars generated by build
with open('../../../.build_vars.json') as f:
self.build_paths = json.load(f)
# setup the DAOS python API
self.Context = DaosContext(self.build_paths['PREFIX'] + '/lib/')
server_group = self.params.get("server_group",'/server/',
'daos_server')
basepath = os.path.normpath(self.build_paths['PREFIX'] + "/../")
tmp = self.build_paths['PREFIX'] + '/tmp'
self.hostlist = self.params.get("test_machines",'/run/hosts/')
self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, tmp)
ServerUtils.runServer(self.hostfile, server_group, basepath)
time.sleep(2)
def tearDown(self):
ServerUtils.stopServer()
os.remove(self.hostfile)
# really make sure everything is gone
CheckForPool.CleanupPools(self.hostlist)
ServerUtils.killServer(self.hostlist)
def test_global_handle(self):
"""
Test ID: DAO
Test Description: Use a pool handle in another process.
:avocado: tags=container,conthandle,vm,small,regression
"""
try:
# use the uid/gid of the user running the test, these should
# be perfectly valid
createuid = os.geteuid()
creategid = os.getegid()
# parameters used in pool create that are in yaml
createmode = self.params.get("mode",'/run/testparams/createmode/')
createsetid = self.params.get("setname",
'/run/testparams/createset/')
createsize = self.params.get("size",'/run/testparams/createsize/')
# initialize a python pool object then create the underlying
# daos storage
pool = DaosPool(self.Context)
pool.create(createmode, createuid, creategid,
createsize, createsetid, None)
pool.connect(1 << 1)
# create a pool global handle
iov_len, buf_len, buf = pool.local2global()
buftype = ctypes.c_byte * buf_len
c_buf = buftype.from_buffer(buf)
sct_pool_handle = sharedctypes.RawValue(IOV,
ctypes.cast(c_buf, ctypes.c_void_p),
buf_len,
iov_len)
# create a container
container = DaosContainer(self.Context)
container.create(pool.handle)
container.open()
# create a container global handle
iov_len, buf_len, buf = container.local2global()
buftype = ctypes.c_byte * buf_len
c_buf = buftype.from_buffer(buf)
sct_cont_handle = sharedctypes.RawValue(IOV,
ctypes.cast(c_buf, ctypes.c_void_p),
buf_len,
iov_len)
sct_pool_uuid = sharedctypes.RawArray(ctypes.c_byte, pool.uuid)
# this should work in the future but need on-line server addition
#arg_list = (
#p = Process(target=CheckHandle, args=arg_list)
#p.start()
#p.join()
# for now verifying global handle in the same process which is not
# the intended use case
CheckHandle(sct_pool_handle, sct_pool_uuid, sct_cont_handle, 0)
except ValueError as e:
print(e)
print(traceback.format_exc())
self.fail("Expecting to pass but test has failed.\n")
```
#### File: ftest/util/ServerUtils.py
```python
import os
import time
import subprocess
import json
import re
import time
import aexpect
from avocado.utils import genio
sessions = {}
class ServerFailed(Exception):
""" Server didn't start/stop properly. """
# a callback function used when there is cmd line I/O, not intended
# to be used outside of this file
def printFunc(thestring):
print "<SERVER>" + thestring
def runServer(hostfile, setname, basepath):
"""
Launches DAOS servers in accordance with the supplied hostfile.
"""
global sessions
try:
server_count = len(genio.read_all_lines(hostfile))
# pile of build time variables
with open(os.path.join(basepath, ".build_vars.json")) as json_vars:
build_vars = json.load(json_vars)
orterun_bin = os.path.join(build_vars["OMPI_PREFIX"], "bin/orterun")
daos_srv_bin = os.path.join(build_vars["PREFIX"], "bin/daos_server")
ld_lib_path = os.path.join(build_vars["PREFIX"], "lib") + os.pathsep + \
os.path.join(build_vars["PREFIX"], "lib/daos_srv")
env_vars = ['CRT_.*', 'DAOS_.*', 'ABT_.*', 'DD_(STDERR|LOG)', 'D_LOG_.*',
'OFI_.*']
env_args = ""
for env_var in os.environ.items():
for pat in env_vars:
if not re.match(pat, env_var[0]):
continue
env_args += "-x {0}=\"{1}\" ".format(env_var, os.environ[env_var])
initial_cmd = "/bin/sh"
server_cmd = orterun_bin + " --np {0} ".format(server_count)
server_cmd += "--hostfile {0} --enable-recovery ".format(hostfile)
server_cmd += env_args
server_cmd += "-x DD_SUBSYS=all -x DD_MASK=all "
server_cmd += "-x LD_LIBRARY_PATH={0} ".format(ld_lib_path)
server_cmd += daos_srv_bin + " -g {0} -c 1 ".format(setname)
server_cmd += " -a" + basepath + "/install/tmp/"
print "Start CMD>>>>{0}".format(server_cmd)
sessions[setname] = aexpect.ShellSession(initial_cmd)
if sessions[setname].is_responsive():
sessions[setname].sendline(server_cmd)
timeout = 300
start_time = time.time()
result = 0
expected_data = "Starting Servers\n"
while True:
pattern = "DAOS server"
output = sessions[setname].read_nonblocking(2, 2)
match = re.findall(pattern, output)
expected_data = expected_data + output
result += len(match)
if result == server_count or time.time() - start_time > timeout:
print ("<SERVER>: {}".format(expected_data))
if result != server_count:
raise ServerFailed("Server didn't start!")
break
print "<SERVER> server started and took %s seconds to start" % \
(time.time() - start_time)
except Exception as e:
print "<SERVER> Exception occurred: {0}".format(str(e))
raise ServerFailed("Server didn't start!")
def stopServer(setname=None):
"""
orterun says that if you send a ctrl-c to it, it will
initiate an orderly shutdown of all the processes it
has spawned. Doesn't always work though.
"""
global sessions
try:
if setname == None:
for k, v in sessions.items():
v.sendcontrol("c")
v.sendcontrol("c")
v.close()
else:
sessions[setname].sendcontrol("c")
sessions[setname].sendcontrol("c")
sessions[setname].close()
print "<SERVER> server stopped"
except Exception as e:
print "<SERVER> Exception occurred: {0}".format(str(e))
raise ServerFailed("Server didn't stop!")
def killServer(hosts):
"""
Sometimes stop doesn't get everything. Really whack everything
with this.
hosts -- list of host names where servers are running
"""
kill_cmds = ["pkill daos_server --signal 9",
"pkill daos_io_server --signal 9"]
for host in hosts:
for cmd in kill_cmds:
resp = subprocess.call(["ssh", host, cmd])
``` |
{
"source": "JohnMalmsteen/python-countdown-cheatbot",
"score": 3
} |
#### File: JohnMalmsteen/python-countdown-cheatbot/countdown_solver.py
```python
import dictionary
import sys, getopt
def main(argv):
try:
opts, args = getopt.getopt(argv, "", ["word="])
result = dictionary.find_largest_anagram(args.pop())
print result
print dictionary.getDefnition(result.pop())
except getopt.GetoptError:
print 'countdown_solver.py <letters>'
sys.exit(2)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: JohnMalmsteen/python-countdown-cheatbot/dictionary.py
```python
import letter_generation
import itertools
dictionary = dict()
##
## Leaving this stuff in to show how the masterlist.txt was compiled
## added bonus of not having to do all the checking when parsing that file
##
#filter = set()
#words = set()
#filter_file = open('words/wordslist.txt', 'r')
#for line in filter_file:
# if line[0].isupper():
# line = line.strip().lower()
# filter.add(line)
#filter_file.close()
#file = open('words/354984si.ngl', 'r')
#for line in file:
# line = line.strip()
# if len(line) > 3 and len(line) < 10 and line.isalpha() and line not in filter:
# myKey = ''.join(sorted(line))
# words.add(line)
# if dictionary.has_key(myKey):
# dictionary[myKey].add(line)
# else:
# dictionary[myKey] = set([line])
#file.close()
#outfile = open('words/masterlist.txt', 'w')
#for elem in words:
# outfile.write(elem + '\n')
#file = open('words/masterlist.txt', 'r')
#for line in file:
# line = line.strip()
# myKey = ''.join(sorted(line))
# if dictionary.has_key(myKey):
# dictionary[myKey].add(line)
# else:
# dictionary[myKey] = set([line])
file = open('words/masterlist.txt', 'r')
for line in file:
line = line.strip()
myKey = ''.join(sorted(line))
if dictionary.has_key(myKey):
dictionary[myKey].add(line)
else:
dictionary[myKey] = set([line])
file.close()
## I tested this with the dictionary.get(key, default = None) function but
## it worked out to be considerably slower than this version, I assume the comparison
## to None on the other receiving function was slowing it down
def checkword(inputword):
if dictionary.has_key(''.join(sorted(inputword))):
return dictionary[''.join(sorted(inputword))]
else:
return False
def find_largest_anagram(inputword):
##print "Input Letters: " + inputword
for index in reversed(range(4,len(inputword)+1)):
combos = itertools.combinations(inputword, index)
for combo in combos:
result = checkword(''.join(combo))
if result != False:
return result
return set(["No Anagrams Found"])
def getDefnition(word):
if(word.startswith("No Anagrams")):
return ""
import urllib
import json
url = "http://dictionaryapi.net/api/definition/%s" % word
data = json.load(urllib.urlopen(url))
try:
result = str(data[0]['Definitions'])
result = "Definition: " + result[3:-2]
return result
except:
return "No definition found for '%s'" % word
#highest = find_largest_anagram(letter_generation.getLettersArray())
#print "Highest Anagram(s): " + str(highest)
#strhighest = highest.pop()
#print getDefnition(strhighest)
s = """\
from __main__ import find_largest_anagram
from __main__ import letter_generation
"""
if __name__ == '__main__':
import timeit
print (timeit.timeit("find_largest_anagram(letter_generation.getLettersArray())", setup=s, number=10000 ))
``` |
{
"source": "JohnMannion51/emerg-tech-assignment",
"score": 3
} |
#### File: JohnMannion51/emerg-tech-assignment/digitReq.py
```python
import gzip
import os.path
import tkinter as tk
from random import randint
from tkinter import filedialog
import keras as kr
import matplotlib.pyplot as plt
import numpy as np
import sklearn.preprocessing as pre
from keras.preprocessing import image
model = kr.models.Sequential()
# Add a hidden layer with 1000 neurons and an input layer with 784.
model.add(kr.layers.Dense(units=1000, activation='relu', input_dim=784))
model.add(kr.layers.Dense(units=1000, activation='relu'))
model.add(kr.layers.Dense(units=1000, activation='relu'))
model.add(kr.layers.Dense(units=1000, activation='relu'))
# Add a 10 neuron output layer.
model.add(kr.layers.Dense(units=10, activation='softmax'))
# Build the graph.
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# Unzips the files and reads in as bytes
with gzip.open('data/train-images-idx3-ubyte.gz', 'rb') as f:
train_img = f.read()
with gzip.open('data/train-labels-idx1-ubyte.gz', 'rb') as f:
train_lbl = f.read()
# reads them into memory
train_img = ~np.array(list(train_img[16:])).reshape(60000, 28, 28).astype(np.uint8)
train_lbl = np.array(list(train_lbl[ 8:])).astype(np.uint8)
inputs = train_img.reshape(60000, 784)/255
# For encoding categorical variables.
encoder = pre.LabelBinarizer()
encoder.fit(train_lbl)
outputs = encoder.transform(train_lbl)
print(train_lbl[0], outputs[0])
if os.path.isfile('data/model.h5'):
model = kr.models.load_model('data/model.h5')
# if model already exist uses it
else:
model.fit(inputs, outputs, epochs=15, batch_size=100)
model.save("data/model.h5")
#makes model and saves it
with gzip.open('data/t10k-images-idx3-ubyte.gz', 'rb') as f:
test_img = f.read()
with gzip.open('data/t10k-labels-idx1-ubyte.gz', 'rb') as f:
test_lbl = f.read()
test_img = ~np.array(list(test_img[16:])).reshape(10000, 784).astype(np.uint8)
test_lbl = np.array(list(test_lbl[ 8:])).astype(np.uint8)
outcome = (encoder.inverse_transform(model.predict(test_img)) == test_lbl).sum()
print("\nModel is", outcome/100,"% Accurate\n")
print("\nModel has been created or loaded into memory")
def testCases():
amm = int(input("How many tests would you like to run "))
from random import randint
for i in range(amm):
print("Test Number : ", i+1,"\n")
x = randint(0, 9999)
print("The random index: ", x, "\n")
print("The result array: ")
test = model.predict(test_img[x:x+1])
# Print the result array
print(test, "\n")
# Get the maximum value from the machine predictions
pred_result = test.argmax(axis=1)
print("The program predicted : =>> ", pred_result)
print("The number is : =>> ", test_lbl[x:x+1])
print("===================")
def loadImage():
root = tk.Tk()
root.withdraw()
#https://stackoverflow.com/questions/9319317/quick-and-easy-file-dialog-in-python
file_path = filedialog.askopenfilename()# opens file select window
img = image.load_img(path=file_path,color_mode = "grayscale",target_size=(28,28,1))
image1 = np.array(list(image.img_to_array(img))).reshape(1, 784).astype(np.uint8) / 255.0
# shapes array
plt.imshow(img)
plt.show()
# plots and displays image
test = model.predict(image1)
# runs test of image on model
print("program has predicted : ", test.argmax(axis=1))
#https://towardsdatascience.com/basics-of-image-classification-with-keras-43779a299c8b
print("Load an image on your system")
opt=True
while opt:
print("============================")
print(""" 1 Load an image
2 Run test
3 Exit """)
opt= input("Choose an option ")
print("============================")
#https://stackoverflow.com/questions/19964603/creating-a-menu-in-python
if opt == "1":
loadImage()
elif opt == "2":
testCases()
elif opt == "3":
exit()
else:
print("Invalid Entry Try Again")
``` |
{
"source": "JohnMansell/CarND-Behavioral-Cloning-P3",
"score": 3
} |
#### File: JohnMansell/CarND-Behavioral-Cloning-P3/helperFunctions.py
```python
import csv
import cv2
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D
from keras.layers.convolutional import Convolution2D
import matplotlib.pyplot as plt
from keras.callbacks import ModelCheckpoint
import random
from tempfile import TemporaryFile
correction = 0.25
num_bins = 23
colorConversion = cv2.COLOR_BGR2LAB
'''---------------------------------------
Read data from File
---------------------------------------'''
def read_data_from_file(fileName, lineArray):
with open(fileName) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lineArray.append(line)
'''---------------------------------------
Extract images and Measurements
---------------------------------------'''
def get_images_and_measurements(lineArray, splitToken, imagePath, imageArray, measurementArray):
for line in lineArray:
for i in range(3):
source_path = line[i]
tokens = source_path.split(splitToken)
filename = tokens[-1]
local_path = imagePath + filename
image = cv2.imread(local_path)
imageArray.append(image)
measurement = float(line[3])
measurementArray.append(measurement)
measurementArray.append(measurement + correction)
measurementArray.append(measurement - correction)
'''---------------------------------------
Print Histogram of Data
---------------------------------------'''
def print_histogram(measurement_array, show, title = ''):
avg_samples_per_bin = len(measurement_array)/num_bins
hist, bins = np.histogram(measurement_array, num_bins)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.plot((np.min(measurement_array), np.max(measurement_array)), (avg_samples_per_bin, avg_samples_per_bin), 'k-')
if show:
plt.title(title)
plt.show()
'''---------------------------------------
Flip each image and measurement
---------------------------------------'''
def flip_image_and_measurement(imageArray, measurementArray, augmented_images, augmented_measurements):
for image, measurement in zip(imageArray, measurementArray):
augmented_images.append(image)
augmented_measurements.append(measurement)
flipped_image = cv2.flip(image, 1)
flipped_measurement = measurement * -1.0
augmented_images.append(flipped_image)
augmented_measurements.append(flipped_measurement)
'''---------------------------------------
Get Transform
---------------------------------------'''
def get_transform(img,
x_bottom = 1136,
x_top = 267,
depth = 83,
hood_depth = 33,
dst_offset = 271,
cal1_offset = 27,
cal2_offset = 30):
img_size = (img.shape[1], img.shape[0])
# src = (x1, y1) , (x2, y2), (x3, y3), (x4, y4)
x1 = int((img_size[0] - x_top) / 2)
x2 = int((img_size[0] + x_top) / 2)
y1 = y2 = int((img_size[1] - depth))
x3 = int((img_size[0] - x_bottom) / 2)
x4 = int((img_size[0] + x_bottom) / 2)
y3 = y4 = (img_size[1] - hood_depth)
# dst = (j1, k1), (j2, k2), (j3, k3), (j4, k4)
j1 = j3 = (img_size[0] / 2) - dst_offset
j2 = j4 = (img_size[0] / 2) + dst_offset
k1 = k2 = 0
k3 = k4 = img_size[1]
src = np.float32([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
dst = np.float32([[j1, k1], [j2, k2], [j3, k3], [j4, k4]])
# Perspective Transform -- Matrix
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
return (M, Minv)
'''---------------------------------------
Warp
---------------------------------------'''
img = cv2.imread('myData/IMG/center_2018_02_26_12_32_17_315.jpg')
M, inv = get_transform(img)
def warp_image(img, mtx):
img_size = (img.shape[1], img.shape[0])
# Perspective Transform
warped = cv2.warpPerspective(img, mtx, img_size, flags=cv2.INTER_LINEAR)
return warped
'''----------------------------
Mag Threshold
-------------------------------'''
smin = 3
smax = 255
bmin = 0
bmax = 209
dmin = 0.1
dmax = 0.9
m_min = 5
m_max = 311
d_kernal = 13
m_kernal = 5
picture = 5
sigma_color = 75
sigma_space = 75
def mag_threshold(image, sobel_kernel=m_kernal, mag_thresh = (m_min, m_max)):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize = sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize = sobel_kernel)
gradmag = np.sqrt(sobelx**2 + sobely**2)
scale_factor = np.max(gradmag) / 255
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0])& (gradmag <= mag_thresh[1])] = 1
return binary_output
'''----------------------------
Color
-------------------------------'''
def color(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
'''----------------------------
Create Binary
-------------------------------'''
def create_binary(img):
warp = warp_image(img, M)
bilateral = cv2.bilateralFilter(warp, m_kernal, sigma_color, sigma_space)
mag = mag_threshold(bilateral, m_kernal, (m_min, m_max))
result = np.copy(warp)
result[(mag == 1)] = 0
return result
'''---------------------------------------
Balance DataSet
---------------------------------------'''
def balance_data_set(augmented_images, augmented_measurements, hist, bins, averageHeight, newImages, newMeasurements, lowerLimit, upperLimit):
for image, measurement in zip(augmented_images, augmented_measurements):
if (measurement < lowerLimit or measurement > upperLimit):
for i in range(num_bins):
if bins[i] < measurement < bins[i + 1]:
difference = abs(averageHeight - hist[i])
multiples = int(difference / hist[i])
for k in range(multiples):
brightness = random.randint(0, 100)
yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
y, u, v = cv2.split(yuv)
y -= brightness
final_yuv = cv2.merge((y, u, v))
newImage = cv2.cvtColor(final_yuv, cv2.COLOR_YUV2BGR)
newImages.append(newImage)
newMeasurements.append(measurement)
'''---------------------------------------
PreProcess Image
---------------------------------------'''
def preprocess_image(img):
img = create_binary(img)
return img
``` |
{
"source": "johnmarcampbell/twircBot",
"score": 4
} |
#### File: twircBot/twircbot/configreader.py
```python
import re
class ConfigReader(object):
"""
A class that reads a configuration file and generates a dictionary
of configuration parameters
"""
def __init__(self):
"""Initialize some variables"""
self.configs = {}
def parse_file(self, config_file_name, is_default = False):
"""
This function opens a file containing one or many configurations
and sends the text to parse_text_block()
"""
with open(config_file_name, 'r') as f:
text_block = f.read()
file_configs = self.parse_text_block(text_block, is_default)
for config in file_configs:
if config in self.configs.keys():
self.configs[config].update(file_configs[config])
else:
self.configs[config] = file_configs[config]
self.check_types()
return self.configs['default']
def parse_text_block(self, block, is_default):
"""
This function takes some block of text, and divides it into sections,
each of which correspond to one configuration spec. It then calls a
function to parse each section.
"""
configs = {}
config_name = "default"
configs[config_name] = ''
for line in block.splitlines():
if len(line) == 0:
continue
if line[0] == '%':
config_name = line[1:]
configs[config_name] = ''
else:
configs[config_name] += line + '\r\n'
for key in configs:
configs[key] = self.parse_config_text(configs[key])
return configs
# if is_default:
# self.check_default_config()
def parse_config_text(self, text):
"""
This function takes a block of text that corresponds to only *one*
config and turns it into a dictionary
"""
config = {}
for line in text.splitlines():
self.parse_line(line, config)
return config
def parse_line(self, line, config):
"""Function to parse individual lines of a config"""
words = self.split_line(line)
# Remove semi-colons, etc., used to separate key/values
key_string = '([<KEY>]+)'
key = re.search(key_string, words[0]).group(1)
values = words[1:]
if len(values) == 1:
values = values[0]
config[key] = values
def split_line(self, line):
"""
This function takes a string and splits it into words, but
keeps strings enclosed in single quotes together
"""
# Split line into words around spaces, unless the spaces are in quotations marks
words = []
inside_quotes = False
temp_word = ''
for i in line:
if i == "'" and not inside_quotes:
inside_quotes = True
elif i == "'" and inside_quotes:
inside_quotes = False
words.append(temp_word)
temp_word = ''
elif i == ' ' and not inside_quotes and temp_word != '':
words.append(temp_word)
temp_word = ''
elif i == ' ' and inside_quotes:
temp_word += i
elif i != ' ':
temp_word += i
if temp_word != '':
words.append(temp_word)
return words
def check_types(self):
"""Function to check the types of all parameters"""
for config in self.configs:
if config != 'template':
for parameter, init_value in self.configs[config].items():
# If value should be a list, convert it
if self.configs['template'][parameter] == 'list' and (type(init_value).__name__ != 'list'):
final_value = [init_value]
elif self.configs['template'][parameter] == 'str':
final_value = str(init_value)
elif self.configs['template'][parameter] == 'int':
final_value = int(init_value)
elif self.configs['template'][parameter] == 'float':
final_value = float(init_value)
elif self.configs['template'][parameter] == 'dict':
final_value = dict(init_value)
else:
final_value = init_value
self.configs[config][parameter] = final_value
```
#### File: twircBot/twircbot/connectivitymonitor.py
```python
from datetime import datetime as dt
import re
from .botmodule import BotModule
from .twitchtools import parse_wrapper
class ConnectivityMonitor(BotModule):
"""Module for monitor health of connection to chat"""
def __init__(self, name):
"""Init function for ConnectivityMonitor"""
BotModule.__init__(self, name)
self.config = self.config_manager.parse_file('twircbot/config/defaultConnectivityMonitor.config')
self.uptime_string = '\\' + self.config['invoke_string'] + self.config['uptime_suffix']
self.bornTime = dt.utcnow()
self.last_data = dt.utcnow()
@parse_wrapper
def parse(self, data):
"""Parse chat data and log it"""
self.last_data = dt.utcnow()
if (data.type == 'privmsg') or (data.type == 'whisper'):
uptime_match = re.search(self.uptime_string, data.content)
if uptime_match:
uptime_message = 'Uptime: ' + self.pretty_timedelta(self.lifetime)
self.reply(data, uptime_message)
def check_timers(self):
"""Function to check timers"""
now = dt.utcnow()
inputDelta = now - self.last_data
self.lifetime = now - self.bornTime
if inputDelta.seconds > self.config['reconnect_timer']:
self.host.reconnect = True
self.last_data = now
else:
self.host.reconnect = False
if self.lifetime.seconds > self.config['stayalive_timer'] and self.config['stayalive_timer'] > 0:
self.host.stayAlive = False
def pretty_timedelta(self, t_delta):
"""Turn a timedelta object into a nicely formatted string"""
(hours, hours_remainder) = (int(t_delta.seconds / 3600), t_delta.seconds % 3600)
(minutes, seconds) = (int(hours_remainder / 60), hours_remainder % 60)
# Round to the nearest second
if t_delta.microseconds >= 0.5:
seconds += 1
pretty_string = ''
if t_delta.days > 0:
pretty_string += '{0!s} days, '.format(t_delta.days)
# Make sure values are correcty padded
if hours >= 10:
hours = str(hours)
else:
hours = '0' + str(hours)
if minutes >= 10:
minutes = str(minutes)
else:
minutes = '0' + str(minutes)
if seconds >= 10:
seconds = str(seconds)
else:
seconds = '0' + str(seconds)
pretty_string += '{0}:{1}:{2} [hh:mm:ss]'.format(hours, minutes, seconds)
return pretty_string
``` |
{
"source": "john-marinelli/Electric-Car-Racing",
"score": 3
} |
#### File: john-marinelli/Electric-Car-Racing/simulation.py
```python
import sys
import time
import logging
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import pyqtgraph as pg
import cProfile
from datastore import (DataStore, RacingSimulationResults)
from logging_config import configure_logging
from physics_equations import (max_negative_power_physics_simulation,
max_positive_power_physics_simulation,
constrained_velocity_physics_simulation,
)
from electric_car_properties import ElectricCarProperties
from track_properties import (TrackProperties,
high_plains_raceway)
logger = logging.getLogger(__name__)
class MainWindow(QWidget):
# define the SIGNALs that MainWindow will send to other threads
mainWindowStartCalculatingSignal = pyqtSignal(int)
def __init__(self, *args, **kwargs):
QWidget.__init__(self, parent=None)
self.data_store = DataStore()
logger.info("MainWindow: DataStore initialized",
extra={'sim_index': self.data_store.get_simulation_index()})
# Create GUI related resources
self.setWindowTitle('Race Simulation')
# create the user play controls and data results graphs to run the simulation
self.createUserDisplayControls()
# create placeholders for the plots MainWindow will delivering (updating)
# data into.
self.graphs = pg.GraphicsLayoutWidget(show=True, title="Race Sim plots")
self.graphs.resize(1000, 540)
self.p1 = self.graphs.addPlot(name="Plot1", title="Time (s)")
self.p2 = self.graphs.addPlot(name="Plot2", title="Distance (m)")
self.p2.hide()
self.p3 = self.graphs.addPlot(name="Plot3", title="Velocity (m/s)")
self.p3.hide()
self.p4 = self.graphs.addPlot(name="Plot4", title="Acceleration (m/s^2)")
self.p4.hide()
self.p5 = self.graphs.addPlot(name="Plot5", title="Motor Power")
self.p5.hide()
self.p6 = self.graphs.addPlot(name="Plot6", title="Battery Power")
self.p6.hide()
self.p7 = self.graphs.addPlot(name="Plot7", title="Battery Energy (joules)")
self.p7.hide()
# Links user X-coordinate movements of all plots together. Practically, there has
# to be one plot they all link to, and in this case it's self.p1 (Time) b
self.p2.setXLink(self.p1)
self.p3.setXLink(self.p1)
self.p4.setXLink(self.p1)
self.p5.setXLink(self.p1)
self.p6.setXLink(self.p1)
self.p7.setXLink(self.p1)
# Layout the major GUI components
#self.layout = QtGui.QVBoxLayout()
self.layout = QHBoxLayout()
self.layout.addWidget(self.userDisplayControlsGroup)
self.layout.addWidget(self.graphs)
self.setLayout(self.layout)
# Create the instances of our worker threads
self.simulationThread = SimulationThread(self.data_store)
self.plotRefreshTimingThread = PlotRefreshTimingThread()
# Setup the SIGNALs to be received from the worker threads
self.simulationThread.simulationThreadSignal.connect(self.signalRcvFromSimulationThread)
self.plotRefreshTimingThread.plotRefreshTimingSignal.connect(self.signalPlotRefresh)
# TODO - what mechanism and what to do when SimulationThread or dies like
# refresh GUI and save/close results file??
#self.simulationThread.finished.connect(self.simulationThreadFinished)
#self.simulationThread.terminated.connect(self.simulationThreadTerminated)
# Now that the SimulationThread has been created (but not yet running), connect the
# Button clicked in MainWindow - call a SimulationThread method to do something
self.buttonRun.clicked.connect(self.createStartCalculatingSignal)
self.buttonStop.clicked.connect(self.simulationThread.thread_stop_calculating)
self.checkboxDistanceBreakpoint.clicked.connect(self.enableBreakpointSpinbox)
self.simulationThread.start()
self.plotRefreshTimingThread.start()
def enableBreakpointSpinbox(self):
if self.checkboxDistanceBreakpoint.isChecked() == True:
self.spinboxDistanceBreakpoint.setEnabled(True)
self.spinboxDistanceBreakpoint.setReadOnly(False)
else:
self.spinboxDistanceBreakpoint.setEnabled(False)
self.spinboxDistanceBreakpoint.setReadOnly(True)
def createStartCalculatingSignal(self):
"""
Send a SIGNAL to the simulation thread to start the simulation calculations.
Based on the user's control settings in the GUI, figure out what "distance" value
to send with the signal to Simulation Thread to start/continue simulation
"distance" value sent to the SimulationThread is overload with these meanings:
>0 distance in meters from the start on the track...
=0 singlestep,
<0 whole track,
"""
if self.checkboxDistanceBreakpoint.isChecked() == True:
distance = self.spinboxDistanceBreakpoint.value()
else:
# No breakpoint indicated on GUI so run the whole track or
# until user hits "pause" button
distance = -1
# signal the thread
self.simulationThread.thread_start_calculating(distance)
def createUserDisplayControls(self):
self.labelDisplayControl = QLabel("Display Control")
# Note - FYI - created in the order the controls appear on screen
self.labelStatus = QLabel("Status")
self.textboxStatus = QLineEdit("Initialized", self)
self.textboxStatus.setReadOnly(True)
self.buttonRun = QPushButton('Run/Continue', self)
self.buttonRun.setEnabled(True)
self.buttonStop = QPushButton('Pause', self)
self.buttonStop.setEnabled(True)
self.checkboxDistanceBreakpoint = QCheckBox('Distance Breakpoint (m)', self)
self.checkboxDistanceBreakpoint.setChecked(False)
self.spinboxDistanceBreakpoint = QDoubleSpinBox()
self.spinboxDistanceBreakpoint.setReadOnly(True)
self.spinboxDistanceBreakpoint.setRange(0,999999)
#outputs of simulation
self.labelSimulationIndex = QLabel("Current Sim. Index")
self.textboxSimulationIndex = QLineEdit("0",self)
self.textboxSimulationIndex.setReadOnly(False)
self.checkboxTime = QCheckBox('Time (s)', self)
self.checkboxTime.setChecked(False)
self.spinboxTime = QDoubleSpinBox()
self.spinboxTime.setReadOnly(True)
self.spinboxTime.setRange(0, 999999)
self.checkboxDistance = QCheckBox('Distance (m)', self)
self.checkboxDistance.setChecked(False)
self.spinboxDistance = QDoubleSpinBox()
self.spinboxDistance.setReadOnly(True)
self.spinboxDistance.setRange(0,999999)
self.checkboxVelocity = QCheckBox('Velocity (m/s)', self)
self.checkboxVelocity.setChecked(False)
self.spinboxVelocity = QDoubleSpinBox()
self.spinboxVelocity.setReadOnly(True)
self.spinboxVelocity.setRange(0,999999)
self.checkboxAcceleration = QCheckBox('Acceleration (m/s^2)', self)
self.checkboxAcceleration.setChecked(False)
self.spinboxAcceleration = QDoubleSpinBox()
self.spinboxAcceleration.setReadOnly(True)
self.checkboxMotorPower = QCheckBox('Motor Power', self)
self.checkboxMotorPower.setChecked(False)
self.spinboxMotorPower = QDoubleSpinBox()
self.spinboxMotorPower.setReadOnly(True)
self.spinboxMotorPower.setRange(0,999999)
self.checkboxBatteryPower = QCheckBox('Battery Power', self)
self.checkboxBatteryPower.setChecked(False)
self.spinboxBatteryPower = QDoubleSpinBox()
self.spinboxBatteryPower.setReadOnly(True)
self.spinboxBatteryPower.setRange(0,999999)
self.checkboxBatteryEnergy = QCheckBox('Battery Energy (j)', self)
self.checkboxBatteryEnergy.setChecked(False)
self.spinboxBatteryEnergy = QDoubleSpinBox()
self.spinboxBatteryEnergy.setReadOnly(True)
self.spinboxBatteryEnergy.setRange(0,999999)
#self.userDisplayControlsGroup = QtGui.QGroupBox('User Display Controls')
self.userDisplayControlsGroup = QGroupBox('User Display Controls')
#self.userDisplayControlsLayout= QtGui.QGridLayout()
self.userDisplayControlsLayout= QGridLayout()
self.userDisplayControlsLayout.addWidget(self.labelStatus, 0, 0)
self.userDisplayControlsLayout.addWidget(self.textboxStatus, 0, 1)
self.userDisplayControlsLayout.addWidget(self.buttonRun, 1, 0)
self.userDisplayControlsLayout.addWidget(self.buttonStop, 1, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxDistanceBreakpoint, 2, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxDistanceBreakpoint, 2, 1)
self.userDisplayControlsLayout.addWidget(self.labelSimulationIndex, 3, 0)
self.userDisplayControlsLayout.addWidget(self.textboxSimulationIndex, 3, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxTime, 4, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxTime, 4, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxDistance, 5, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxDistance, 5, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxVelocity, 6, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxVelocity, 6, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxAcceleration, 7, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxAcceleration, 7, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxMotorPower, 8, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxMotorPower, 8, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxBatteryPower, 9, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxBatteryPower, 9, 1)
self.userDisplayControlsLayout.addWidget(self.checkboxBatteryEnergy, 10, 0)
self.userDisplayControlsLayout.addWidget(self.spinboxBatteryEnergy, 10, 1)
self.userDisplayControlsGroup.setLayout(self.userDisplayControlsLayout)
def simulationThreadResultsDataDisplay(self):
# TODO placeholder for real work to be done when the SimulationThread (a simulationThread thread)
# SIGNALs MainWindow new data is available in shared memory
print('Window SIGNAL from SimulationThread: Results_data_ready')
def simulationThreadFinished(self):
# TODO placeholder for SimulationThread SIGNALs ??exiting
# data is available in shared memory
print('Window: SIGNAL From SimulationThread: Finished')
def simulationThreadTerminated(self):
# TODO placeholder for SimulationThread SIGNALs terminated
print('Window: SIGNAL From SimulationThread: Terminated')
"""
Slots routines to handle SIGNALs sent to MainWindow from other threads
"""
@pyqtSlot(str)
def signalRcvFromSimulationThread(self, text):
#self.buttonRun.setText(text)
self.textboxStatus.setText(text)
@pyqtSlot()
def signalPlotRefresh(self):
#Display/update the window to display computation status, data, and plots selected by the user
# This is called periodically because of the signal emitted from PlotRefreshTimingThread
current_sim_index = (self.data_store.get_simulation_index())
logger.info("MainWindow:", extra={'sim_index': current_sim_index})
self.textboxSimulationIndex.setText("{}".format(current_sim_index))
"""
Only refresh data if the simulations calculations have begun, indicated by
current_sim-index > 0
Note: current_sim_index is descremented "-1" for the following calls
because the lap_velocity_simulation calculations may be incomplete for the index
when this "plot" signal was received and interrupted it. That is, the
SimulationThread is/could be still updating a DataStore data (lists) records
simulation_index and not all lists # have been calculated, so we should
just plot upto the last complete record.
"""
if current_sim_index > 0 :
# Get the current data values and update the corresponding display field textbox
time = self.data_store.get_time_at_index(current_sim_index-1)
self.spinboxTime.setValue(time)
distance = self.data_store.get_distance_at_index(current_sim_index-1)
self.spinboxDistance.setValue(distance)
velocity = self.data_store.get_velocity_at_index(current_sim_index-1)
self.spinboxVelocity.setValue(velocity)
acceleration = self.data_store.get_acceleration_at_index(current_sim_index-1)
self.spinboxAcceleration.setValue(acceleration)
motor_power = self.data_store.get_motor_power_at_index(current_sim_index-1)
self.spinboxMotorPower.setValue(motor_power)
battery_power = self.data_store.get_battery_power_at_index(current_sim_index-1)
self.spinboxBatteryPower.setValue(battery_power)
# TBD not yet implemented in physics_equations
#battery_energy = self.data_store.get_battery_energy_at_index(current_sim_index-1)
#self.spinboxBatteryEnergy.setValue(battery_energy)
# Display the data values
# create a new plot for every point simulated so far
x = [z for z in range(current_sim_index)]
_time = []
_distance = []
_velocity = []
_max_velocity = []
_acceleration = []
_motor_power = []
_battery_power = []
_battery_energy = []
_time = self.data_store.get_time_list(current_sim_index)
_distance = self.data_store.get_distance_list(current_sim_index)
_velocity = self.data_store.get_velocity_list(current_sim_index)
_max_velocity = self.data_store.get_track_max_velocity_list(current_sim_index)
_acceleration = self.data_store.get_acceleration_list(current_sim_index)
_motor_power = self.data_store.get_motor_power_list(current_sim_index)
_battery_power = self.data_store.get_battery_power_list(current_sim_index)
#TODO not yet implemented
#_battery_energy = self.data_store.get_battery_energy_list(current_sim_index)
self.p1.plot(x=x, y=_time, name="Plot1", title="Time")
# selectively display the plots based on the checkboxes
if self.checkboxDistance.isChecked() == True :
self.p2.show()
self.p2.plot(x=x, y=_distance, name="Plot2", title="Distance (m)")
else:
self.p2.hide()
if self.checkboxVelocity.isChecked() == True :
self.p3.show()
self.p3.plot(x=x, y=_max_velocity, name="Plot3", title="Max Velocity (m/sec)", pen='r')
self.p3.plot(x=x, y=_velocity, name="Plot3", title="Velocity (m/sec)")
else:
self.p3.hide()
if self.checkboxAcceleration.isChecked() == True :
self.p4.show()
self.p4.plot(x=x, y=_acceleration, name="Plot4", title="Acceleration (m/sec^2)")
else:
self.p4.hide()
if self.checkboxMotorPower.isChecked() == True :
self.p5.show()
self.p5.plot(x=x, y=_motor_power, name="Plot5", title="Motor Power")
else:
self.p5.hide()
if self.checkboxBatteryPower.isChecked() == True :
self.p6.show()
self.p6.plot(x=x, y=_battery_power, name="Plot6", title="Battery Power")
else:
self.p6.hide()
"""TBD - to be added once Battery Energy is working in physics_equations
if self.checkboxBatteryEnergy.isChecked() == True :
self.p7.show()
self.p7.plot(x=x, y=_battery_energy, name="Plot7", title="Battery Energy (joules)")
else:
self.p7.hide()
"""
class SimulationThread(QThread):
# Define the Signals we'll be emitting to the MainWindow
simulationThreadSignal = pyqtSignal(str)
simulationThreadPlotSignal = pyqtSignal(int)
breakpointDistance = 0
def __init__(self, passed_data_store, parent=None):
QThread.__init__(self, parent)
self.exiting = False
self.setObjectName("SimulationThread")
""" SimulationComputing is used for staring/stopping loop control logic which is
controlled ( signalled) from the MainWindow.
Start without compution in the simulationThread running
"""
self.simulationComputing = False
self.breakpointDistance = 0
# Initialize the simulation universe
self._data_store = passed_data_store
self.initialize_race()
#print('SimulationThread: __init()__')
#print("SimulationThread: Simulation Index = {}".format(self._data_store.get_simulation_index()))
#connect some signals from the main window to us
#self.connect(self, QtCore.SIGNAL('To_End',self.processToEnd)
def __del__(self):
# Before a SimulationThread object is destroyed, we need to ensure that it stops processing.
# For this reason, we implement the following method in a way that indicates to
# the part of the object that performs the processing that it must stop, and waits
# until it does so.
self.exiting = True
self.wait()
# rotational inertia estimation: http://www.hpwizard.com/rotational-inertia.html
def initialize_race(self):
segment_distance = 0.005 # meters, this must be very very small
battery_power = 40000 # 40kW
motor_efficiency = 0.8
wheel_radius = 0.25 # m, ~20 in OD on tires
rotational_inertia = 10 # kg*m^2
mass = 1000 # kg
drag_coefficient = 0.4
frontal_area = 7 # m^2
air_density = 1 # kg/m^3
wheel_pressure_bar = 3 # bar
track = TrackProperties()
track.set_air_density(air_density)
for distance in high_plains_raceway:
track.add_critical_point(distance, high_plains_raceway[distance], track.FREE_ACCELERATION)
track.generate_track_list(segment_distance)
car = ElectricCarProperties()
car.set_car_parameters(mass=mass, rotational_inertia=rotational_inertia,
motor_power=battery_power, motor_efficiency=motor_efficiency,
battery_capacity=10, drag_coefficient=drag_coefficient,
frontal_area=frontal_area, wheel_radius=wheel_radius,
wheel_pressure_bar=wheel_pressure_bar)
self._data_store.initialize_lap_lists(len(track.distance_list))
self._data_store.set_car_properties(car)
self._data_store.set_track_properties(track)
""" SimulationThread signal handling routines. This is the collection of SLOTS
that get signaled (emitted) from the MainWindow and tell the SimulationThread
what to do, like change states and start calculating, pause, etc.
"""
@pyqtSlot()
def thread_start_calculating(self, distance_value):
"""
This signal (slot) handler takes the distance value
and updates SimulationThread computing state and interprets
the distance_value into appropriate values for "breakpoints" to,
if necessary, to stop computing.
"""
print("Breakpoint Distance value:{}".format(distance_value))
logger.info('Slot:thread_start_calculating :',
extra={'sim_index': self._data_store.get_simulation_index()})
if distance_value == 0:
logger.info('Slot:thread_start_calculating SINGLE STEP NOT IMPLEMENTED:',
extra={'sim_index': self._data_store.get_simulation_index()})
#TODO - finish this breakpoint case
self.simulationComputing = False
elif distance_value == -1:
logger.info('Slot:thread_start_calculating RUN TO COMPLETION :',
extra={'sim_index': self._data_store.get_simulation_index()})
# set the breakpoint to be a very large number to indicate run to completion
self.breakpointDistance = 9999999
self.simulationComputing = True
else:
# run to the distance value point in the track
sim_index = self._data_store.get_simulation_index()
if distance_value > self._data_store.get_distance_at_index(sim_index) :
logger.info('Slot:thread_start_calculating RUN TO DISTANCE :',
extra={'sim_index': sim_index})
# requested breakpoint is further down the track
self.breakpointDistance = distance_value
# Start computing and acknowledge to MainWindow by sending a signal back
self.simulationThreadSignal.emit("Calculating...")
# "state" variable indicating thread should be calculating
self.simulationComputing = True
else:
logger.info('Slot:thread_start_calculating PAST REQUESTED DISTANCE :',
extra={'sim_index': sim_index})
# simulation has already past this point in the track, don't proceed
self.simulationComputing = False
@pyqtSlot()
def thread_stop_calculating(self):
logger.info('Slot:thread_stop_calculating :',
extra={'sim_index': self._data_store.get_simulation_index()})
# Now send a signal back to the main window
self.simulationThreadSignal.emit("Paused")
# "state" variable indicating thread should stop calculating
self.simulationComputing = False
def racing_simulation(self):
"""Function accepts a car and a track and executes
a simulation to ouput critical metrics related
to battery life and track speed.
Args:
Nothing, all required vars are defined in class
Returns:
Nothing, all required vars are defined in class
"""
results = RacingSimulationResults()
self.lap_velocity_simulation()
# only calculate results if the simulation ran through without an interruption
if not self._data_store.exit_event.is_set():
lap_results = self._data_store.get_lap_results()
# TODO fix this
#results.laps_per_pit_stop = car["battery_capacity"] / lap_results.motor_energy_list[-1]
results.lap_time = lap_results.end_velocity
results.lap_results = lap_results
self._data_store.set_race_results(results)
def lap_velocity_simulation(self):
"""Function calculates the velocity profile of a car with
car_properties on a track with track_properties. The car
starts with an ititial velocity of initial_velocity.
Args:
data_store (DataStore): Thread safe storage for all simulation data
Returns:
Nothing (all data saved in the datastore)
"""
# performance increases by assigning local functions
# https://towardsdatascience.com/10-techniques-to-speed-up-python-runtime-95e213e925dc
add_physics_result_to_datastore = self._data_store.add_physics_results_to_lap_results
get_velocity = self._data_store.get_velocity_at_index
track = self._data_store.get_track_properties()
air_density = track.get_air_density()
car = self._data_store.get_car_properties()
# need to populate the time profile be the same length as the distance list
# to complete a lap of simulation
list_len = len(track.distance_list)
logger.debug('track.distance_list length={}'.format(list_len),
extra={'sim_index': self._data_store.get_simulation_index()})
# TODO - Add self.simulationComputing to loop control to while
while self._data_store.get_simulation_index() < list_len:
# get the new index we are going to calculate
sim_index = self._data_store.get_simulation_index()
if self._data_store.exit_event.is_set():
break
distance_of_travel = (track.distance_list[sim_index] -
track.distance_list[sim_index - 1])
# only continue simulation computing if the GUI says to do so.
if (self.simulationComputing == True and self.breakpointDistance > track.distance_list[sim_index]):
velocity = get_velocity(sim_index - 1)
physics_results = max_positive_power_physics_simulation(velocity,
distance_of_travel,
car,
air_density)
add_physics_result_to_datastore(physics_results, sim_index)
# check if velocity constraints are violated
if get_velocity(sim_index) > track.max_velocity_list[sim_index]:
# velocity constraint violated!!
# start walking back until velocity constraint at sim_index is met
logger.info("velocity constraint violated starting walk back, current v: {}, max: {}"
.format(physics_results.final_velocity, track.max_velocity_list[sim_index]),
extra={'sim_index': self._data_store.get_simulation_index()})
max_velocity_constraint = track.max_velocity_list[sim_index]
while get_velocity(sim_index) > max_velocity_constraint:
"""This while loop's purpose is to recalculate a portion of the
car's car profile because the car ended up going too fast at a point on the
track. To recalculate the following happens:
1. a "walk back" index is used to track how far back the recalculation occurs
2. from the index (sim_index - walk_back_index) to (sim_index - 1) the results
are calculated as a maximum regeneration effort by the motor
3. at the sim_index the results are calculated as a constrained velocity
- if the results of the calculation are realistic then the walk back is done
- if the results are not realistic then increment the
walk back counter and recalculate
"""
walk_back_counter = self._data_store.get_walk_back_counter()
recalculation_start_index = sim_index - walk_back_counter
logger.debug("starting and ending walkback index: {}, {}"
.format(recalculation_start_index, sim_index),
extra={'sim_index': self._data_store.get_simulation_index()})
for i in range(recalculation_start_index, sim_index):
velocity = get_velocity(i - 1)
logger.debug("velocity: {}"
.format(velocity),
extra={'sim_index': i})
# recalculate with negative motor power
physics_results = max_negative_power_physics_simulation(velocity,
distance_of_travel,
car,
air_density)
logger.debug("next velocity: {}"
.format(physics_results.final_velocity),
extra={'sim_index': i})
add_physics_result_to_datastore(physics_results, i)
velocity = get_velocity(sim_index - 1)
# last deceleration will be a constrained velocity because
# it will be neither max positive or negative motor power
physics_results = \
constrained_velocity_physics_simulation(velocity,
max_velocity_constraint,
distance_of_travel,
car,
air_density)
logger.debug("velocity start, end, max: {} {} {}"
.format(velocity,
physics_results.final_velocity,
max_velocity_constraint),
extra={'sim_index': sim_index})
# check if constrained velocity calculation is realistic
# TODO other checks here can be on acceleration or wheel force
if physics_results.motor_power < -car["motor_power"]:
logger.debug(
"velocity constraint still violated, calculated power: {}, max power: {}"
.format(physics_results.motor_power, car["motor_power"]),
extra={'sim_index': sim_index})
logger.debug("sim_index, walkback: {} {}, incrementing walk back"
.format(sim_index, walk_back_counter),
extra={'sim_index': sim_index})
self._data_store.increment_walk_back_counter()
else:
logger.info(
"velocity constraint accepted, calculated power: {}, max power: {}"
.format(physics_results.motor_power, car["motor_power"]),
extra={'sim_index': sim_index})
logger.info("constrained velocity equation accepted",
extra={'sim_index': sim_index})
add_physics_result_to_datastore(physics_results, sim_index)
#end of while while get_velocity(sim_index) > max_velocity_constraint:
# walk back complete, reset walk back index for next time
self._data_store.reset_walk_back_counter()
# completed calculation for the latest simulation index,
self._data_store.increment_simulation_index()
else:
# self.simulationComputing is False or we've reached a breakpoint,
# so wait for GUI user to indicate proceed
if self.simulationComputing == True :
# if we're computing and got here, we must have hit a breakpoint, therefore pause
# Now send a signal back to the main window
self.simulationThreadSignal.emit("Paused")
# "state" variable indicating thread should stop calculating
self.simulationComputing = False
#else:
# we've began not computing or a breakpoint already has sent us there
# so do nothing more than waitk
# in any case, wait until user gives us a new condition to continue computing
time.sleep(1.0)
logger.debug("waiting for simulationComputing==True",
extra={'sim_index': sim_index})
# end of while data_store.get_simulation_index() < list_len:
logger.info("SIMULATION COMPLETE!", extra={'sim_index': 'N/A'})
self.simulationThreadSignal.emit("Finished!")
self._data_store.exit_event.set()
def run(self):
# Note: This is never called directly. It is called by Qt once the
# thread environment with the thread's start() method has been setup,
# and then runs "continuously"
logger.info("SimulationThread: entering cProfile.runctx() ",
extra={'sim_index': 'N/A'})
# profiling tool, look at results with runsnake:
# https://kupczynski.info/2015/01/16/profiling-python-scripts.html
# this has relatively little overhead for the overall runtime of the program
# I have only been able to get the runsnake files to work on linux
# alternative profile results viewer for windows (untried): https://sourceforge.net/projects/qcachegrindwin/
cProfile.runctx("self.racing_simulation()", globals(), locals(), 'profile-simulation.out')
class PlotRefreshTimingThread(QThread):
# Thread responsible for a periodic signal to the MainWindow which when received causes
# MainWindow to refresh it's plots.
# Define the Signals we'll be emitting to the MainWindow
plotRefreshTimingSignal = pyqtSignal()
# start without compution in the simulationThread running
def __init__(self, parent=None):
QThread.__init__(self, parent)
self.exiting = False
logger.info("PlotRefreshTimingThread: __init()__",
extra={'sim_index': 'N/A'})
# TODO connect some signals from the main window to us
#self.connect(self, QtCore.SIGNAL('To_End',self.processToEnd)
def __del__(self):
# Before a PlotRefreshTimingThread object is destroyed, we need to ensure that it stops
# processing. For this reason, we implement the following method in a way that
# indicates to the part of the object that performs the processing that it must stop,
# and waits until it does so.
self.exiting = True
self.wait()
def run(self):
# Note: This is never called directly. It is called by Qt once the
# thread environment with the thread's start() method has been setup,
# and then runs "continuously" to do the work of the thread as it's main
# processing loop
logger.info("PlotRefreshTimingThread: entering while() ",
extra={'sim_index': 'N/A'})
while True:
time.sleep(5.0)
self.plotRefreshTimingSignal.emit()
if __name__ == "__main__":
MainApp = QApplication(sys.argv)
if __name__ == "__main__":
configure_logging()
window = MainWindow()
window.show()
sys.exit(cProfile.runctx("MainApp.exec_()", globals(), locals(), 'profile-display.out'))
``` |
{
"source": "JohnMarion54/World_Bank_Correlations",
"score": 3
} |
#### File: World_Bank_Correlations/tests/test_World_Bank_Correlations.py
```python
from World_Bank_Correlations import World_Bank_Correlations as wbc
import requests
import pandas as pd
import world_bank_data as wb
def test_wb_corr():
thing1=wb.get_series('1.0.HCount.1.90usd', mrv=50).reset_index()
thing2=wb.get_series('3.0.IncShr.q1',mrv=50).reset_index()
thing3=wb.get_series('3.0.Gini',mrv=50).reset_index()
merged1=pd.merge(thing1,thing2,how='inner',on=['Country','Year'])
merged2=pd.merge(thing1,thing3,how='inner',on=['Country','Year'])
corr1=merged1.loc[:,'1.0.HCount.1.90usd'].corr(merged1.loc[:,'3.0.IncShr.q1'])
corr2=merged2.loc[:,'1.0.HCount.1.90usd'].corr(merged2.loc[:,'3.0.Gini'])
assert wbc.wb_corr(thing1,3,'3.0.IncShr.q1').loc['Income Share of First Quintile','Correlation']==corr1 #test with only one indicator
assert wbc.wb_corr(thing1,3,['3.0.Gini','3.0.IncShr.q1']).loc['Gini Coefficient','Correlation']==corr2 #test with multiple indicators
mumbo=pd.DataFrame()
jumbo=pd.DataFrame()
tumbo=pd.DataFrame()
for country in thing1['Country'].unique():
m=thing1[thing1['Country']==country]
m.loc[:,'lag1']=m.iloc[:,3].shift(-1)
m.loc[:,'pct_chg1']=(((m.iloc[:,3]-m.loc[:,'lag1'])/m.loc[:,'lag1'])*100)
mumbo=pd.concat([mumbo,m])
for country in thing2['Country'].unique():
j=thing2[thing2['Country']==country]
j.loc[:,'lag2']=j.iloc[:,3].shift(-1)
j.loc[:,'pct_chg2']=(((j.iloc[:,3]-j.loc[:,'lag2'])/j.loc[:,'lag2'])*100)
jumbo=pd.concat([jumbo,j])
for country in thing3['Country'].unique():
t=thing3[thing3['Country']==country]
t.loc[:,'lag3']=t.iloc[:,3].shift(-1)
t.loc[:,'pct_chg3']=(((t.iloc[:,3]-t.loc[:,'lag3'])/t.loc[:,'lag3'])*100)
tumbo=pd.concat([tumbo,t])
merged_pct1=pd.merge(mumbo,jumbo,how="inner",on=['Country','Year'])
merged_pct2=pd.merge(mumbo,tumbo,how="inner",on=['Country','Year'])
corr_chg1=merged_pct1['pct_chg1'].corr(merged_pct1['pct_chg2'])
corr_chg2=merged_pct2['pct_chg1'].corr(merged_pct2['pct_chg3'])
assert corr_chg1==wbc.wb_corr(thing1,3,'3.0.IncShr.q1',True).loc['Income Share of First Quintile','Correlation_change']
assert corr_chg2==wbc.wb_corr(thing1,3,['3.0.IncShr.q1','3.0.Gini'],True).loc['Gini Coefficient','Correlation_change']
def test_wb_corr2():
thing1=wbc.wb_corr(wb.get_series('1.0.HCount.1.90usd', mrv=50).reset_index(),3,'3.0.IncShr.q1')
thing2=wbc.wb_corr(wb.get_series('1.0.HCount.1.90usd', mrv=50).reset_index(),3,['3.0.IncShr.q1','3.0.Gini'])
assert len(thing1)==1
assert len(thing2)==2
assert abs(thing2.reset_index().loc[1,'Correlation'])<abs(thing2.reset_index().loc[0,'Correlation'])
def test_wb_topic_corrs():
cors=[]
indicators=[]
sample_data=wb.get_series('1.0.HCount.1.90usd', mrv=50).reset_index()
topic_df=pd.read_xml(requests.get('http://api.worldbank.org/v2/topic/1/indicator?per_page=50').content)
for i in range(0,len(topic_df)):
try:
indicator=topic_df['id'][i]
thing=pd.DataFrame(wb.get_series(indicator,mrv=50))
except:
pass
merged=pd.merge(sample_data,thing,how='inner',on=['Country','Year'])
cors.append(merged.iloc[:,3].corr(merged.iloc[:,(merged.shape[1]-1)]))
indicators.append(topic_df['{http://www.worldbank.org}name'][i])
result=pd.DataFrame(list(zip(indicators,cors)),columns=['Indicator','Correlation']).sort_values(by='Correlation',key=abs,ascending=False).set_index('Indicator').head(5)
assert wbc.wb_topic_corrs(sample_data,3,1,k=5).iloc[0,0]==result.iloc[0,0]
highest_corr_check=wb.get_series('SL.AGR.EMPL.MA.ZS', mrv=50).reset_index()
merged_check=pd.merge(sample_data,highest_corr_check,how='inner',on=['Country','Year'])
result_corr=merged_check.loc[:,'1.0.HCount.1.90usd'].corr(merged_check.loc[:,'SL.AGR.EMPL.MA.ZS'])
assert result.iloc[0,0]==result_corr
mumbo=pd.DataFrame()
jumbo=pd.DataFrame()
for country in sample_data['Country'].unique():
m=sample_data[sample_data['Country']==country]
m.loc[:,'lag']=m.loc[:,'1.0.HCount.1.90usd'].shift(-1)
m.loc[:,'pct_chg1']=(((m.loc[:,'1.0.HCount.1.90usd']-m.loc[:,'lag'])/m.loc[:,'lag'])*100)
mumbo=pd.concat([mumbo,m])
high_chg_check=wb.get_series('ER.H2O.FWAG.ZS',mrv=50).reset_index()
for country in high_chg_check['Country'].unique():
j=high_chg_check[high_chg_check['Country']==country]
j.loc[:,'lag2']=j.loc[:,'ER.H2O.FWAG.ZS'].shift(-1)
j.loc[:,'pct_chg2']=(((j.loc[:,'ER.H2O.FWAG.ZS']-j.loc[:,'lag2'])/j.loc[:,'lag2'])*100)
jumbo=pd.concat([jumbo,j])
next_check=pd.merge(mumbo,jumbo,how='inner',on=['Country','Year'])
chg_check_result=next_check.loc[:,'pct_chg1'].corr(next_check.loc[:,'pct_chg2'])
assert wbc.wb_topic_corrs(sample_data,3,1,3,True).iloc[1,2]==chg_check_result
def test_wb_corrs_topic2():
assertion_matrix = wbc.wb_topic_corrs(wb.get_series('3.0.Gini',mrv=50).reset_index(),3,'Energy & Mining')==wbc.wb_topic_corrs(wb.get_series('3.0.Gini',mrv=50).reset_index(),3,5)
assert assertion_matrix['Correlation'].sum()==len(assertion_matrix)
assert assertion_matrix['n'].sum()==len(assertion_matrix)
assert wbc.wb_topic_corrs(wb.get_series('3.0.Gini',mrv=50).reset_index(),3,'Energy & Mining',change=True,t_lim=.4).shape[1]==6
assert wbc.wb_topic_corrs(wb.get_series('3.0.Gini',mrv=50).reset_index(),3,'Energy & Mining',t_lim=.4).shape[1]==3
trial = wbc.wb_topic_corrs(wb.get_series('SP.POP.TOTL',mrv=50).reset_index(),3,1)
assert abs(trial.reset_index().loc[0,'Correlation'])>abs(trial.reset_index().loc[1,'Correlation'])
assert abs(trial.reset_index().loc[1,'Correlation'])>abs(trial.reset_index().loc[2,'Correlation'])
def test_wb_corrs_search():
sample_data=wb.get_series('3.0.Gini',mrv=50).reset_index()
inc_share_top=wb.get_series('3.0.IncShr.q5',mrv=50).reset_index()
merged_test=pd.merge(sample_data,inc_share_top,how='inner',on=['Country','Year'])
corr_result=merged_test.loc[:,'3.0.Gini'].corr(merged_test.loc[:,'3.0.IncShr.q5'])
assert wbc.wb_corrs_search(sample_data,3,'income share',3).loc['Income Share of Fifth Quintile',"Correlation"]==corr_result
quint2=wb.get_series('3.0.IncShr.q2',mrv=50).reset_index()
mumbo=pd.DataFrame()
jumbo=pd.DataFrame()
for country in sample_data['Country'].unique():
m=sample_data[sample_data['Country']==country]
m.loc[:,'lag_dat']=m.loc[:,'3.0.Gini'].shift(-1)
m.loc[:,'pct_chg_dat']=(((m.loc[:,'3.0.Gini']-m['lag_dat'])/m['lag_dat'])*100)
mumbo=pd.concat([mumbo,m])
for country in quint2['Country'].unique():
j=quint2[quint2['Country']==country]
j.loc[:,'lag_ind']=j.loc[:,'3.0.IncShr.q2'].shift(-1)
j.loc[:,'pct_chg_ind']=(((j.loc[:,'3.0.IncShr.q2']-j['lag_ind'])/j['lag_ind'])*100)
jumbo=pd.concat([jumbo,j])
merged_pct_test=pd.merge(mumbo,jumbo,how='inner',on=['Country','Year'])
change_cor_result=merged_pct_test.loc[:,'pct_chg_dat'].corr(merged_pct_test.loc[:,'pct_chg_ind'])
assert wbc.wb_corrs_search(sample_data,3,'income share',3,True).loc['Income Share of Second Quintile','Correlation_change']==change_cor_result
``` |
{
"source": "johnmarkdaniels/ugroup_challenge",
"score": 3
} |
#### File: ugroup_challenge/api/cars_api.py
```python
from flask import Flask, request, jsonify
# from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import os
import sqlite3
from flask import g
DATABASE = '../database/cars.db'
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
# def index():
# print('index function')
# cur = get_db().cursor()
def test_func():
print('Hello, I am confirming this works')
@app.route('/')
def index():
cur = get_db().cursor()
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
################################################################
### SQLAlchemy VERSION
# app = Flask(__name__)
# basedir = os.path.abspath(os.path.dirname(__file__))
# app.config['SQLITE_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'cars.db')
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# db = SQLAlchemy(app)
# ma = Marshmallow(app)
# class store(db.Model):
# store = db.Column(db.String(80), primary_key=True)
# store_id = db.Column(db.String(120), unique=True)
# def __init__(self, store, store_id):
# self.store = store
# self.store_id = store_id
# class StoreSchema(ma.Schema):
# class Meta:
# # Fields to expose
# fields = ('Store', 'Store_ID')
# store_schema = StoreSchema()
# store_schemas = StoreSchema(many=True)
################################################################
# endpoint to create new store
# endpoint to read store(s)
# SQLite VERSION
@app.route('/stores/')
def test_func():
print('Hello, I am confirming this works')
test_func()
# for store in query_db('select * from Stores_Table limit 5'):
# print store['Store'], 'has the Store_ID', store['Store_ID']
###############################
# SQLAlchemy VERSION
# # @app.route("/store", methods=["GET"])
# def get_store():
# all_stores = store.query.all()
# result = store_schema.dump(all_stores)
# return jsonify(result.data)
################################
# endpoint to update store(s)
# endpoint to delete store(s)
# endpoint to create new VIN
# endpoint to read VIN(s)
# endpoint to update VIN(s)
# endpoint to delete VIN(s)
# endpoint to create new transaction
# endpoint to read transaction(s)
# endpoint to update transaction(s)
# endpoint to delete transaction(s)
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "johnmarkkarr/plugin.video.internet.archive",
"score": 3
} |
#### File: johnmarkkarr/plugin.video.internet.archive/common.py
```python
from __future__ import unicode_literals
import xbmcaddon
import urllib
import urlparse
import requests
import json
import xbmc
import os
class Media(object):
@classmethod
def factory(cls, type):
if type == 0:
return Video()
if type == 1:
return Audio()
if type == 2:
return Image()
def filter(self, items, sort):
results = []
for format in self.FORMATS:
if format in items:
if sort:
items[format].sort(key=lambda item: item[1].getLabel())
results.extend(items[format])
return results
class Video(Media):
FORMATS = ['mpeg', 'mp4', 'ogv', 'ogg', 'mkv', 'm4a']
TYPE = 'movies'
INFO = 'video'
class Audio(Media):
FORMATS = ['ogg', 'mp3', 'oga', 'wav', 'mid', 'midi', 'flac', 'aiff', 'aac', 'shn']
TYPE = 'songs'
INFO = 'music'
class Image(Media):
FORMATS = ['png', 'jpg', 'jpeg', 'jp2', 'tiff', 'gif', 'bmp']
TYPE = 'images'
INFO = 'pictures'
class Addon(object):
TYPES = {'video': 0, 'audio': 1, 'image': 2}
MEDIA = ['movies', 'audio', 'image']
ACTIONS = ['Search', 'Browse', 'Favorites', 'Picks']
def __init__(self, home, handle, args):
addon = urlparse.urlparse(home).netloc
self.addon = xbmcaddon.Addon(id=addon)
self.home = home
self.handle = int(handle)
self.args = {}
self.picksURL = 'https://raw.githubusercontent.com/johnmarkkarr/plugin.video.internet.archive/master/picks.json'
args = urlparse.parse_qs(args[1:])
for key in args:
self.args[key] = args[key][0]
self.url = 'http://archive.org'
def buildURL(url, query):
return url + '?' + urllib.urlencode(query)
def makeRequest(url):
try:
raw = requests.get(url)
except requests.ConnectionError:
return (False, 'Connection error.')
except requests.Timeout:
return (False, 'Connection timed out.')
except:
return (False, 'Failed to connect to server.')
try:
raw.raise_for_status()
except requests.HTTPError as e:
return (False, e.message)
return (True, raw)
def openFavorites(addon):
dir = xbmc.translatePath(addon.addon.getAddonInfo('profile'))
try:
os.mkdir(dir)
except OSError:
pass
path = os.path.join(dir, 'favorites.json')
try:
file = open(path, 'r')
except IOError:
file = open(path, 'w')
try:
favorites = json.load(file)
except:
favorites = [[], [], []]
file.close()
return favorites, path
``` |
{
"source": "johnmartingodo/pyKinematicsKineticsToolbox",
"score": 3
} |
#### File: pyKinematicsKineticsToolbox/pykinematicskineticstoolbox/pykinematicskineticsfunctions.py
```python
import numpy as np
# Kinematics functions
def Rln():
Rlb = np.zeros((3,3))
Rlb[0, :] = [-1, 0, 0]
Rlb[1, :] = [0, 1, 0]
Rlb[2, :] = [0, 0, -1]
return Rlb
def Tln():
Tlb = np.zeros((3,3))
Tlb[0, :] = [-1, 0, 0]
Tlb[1, :] = [0, 1, 0]
Tlb[2, :] = [0, 0, -1]
return Tlb
def Rzyx(phi, theta, psi):
Rbn = np.zeros((3,3))
Rbn[0,0] = np.cos(psi)*np.cos(theta)
Rbn[0,1] = -np.sin(psi)*np.cos(phi) + np.cos(psi)*np.sin(theta)*np.sin(phi)
Rbn[0,2] = np.sin(psi)*np.sin(phi) + np.cos(psi)*np.cos(phi)*np.sin(theta)
Rbn[1,0] = np.sin(psi)*np.cos(theta)
Rbn[1,1] = np.cos(psi)*np.cos(phi) + np.sin(phi)*np.sin(theta)*np.sin(psi)
Rbn[1,2] = -np.cos(psi)*np.sin(phi) + np.sin(theta)*np.sin(psi)*np.cos(phi)
Rbn[2,0] = -np.sin(theta)
Rbn[2,1] = np.cos(theta)*np.sin(phi)
Rbn[2,2] = np.cos(theta)*np.cos(phi)
return Rbn
def Smtrx(vector):
lambda_1 = vector[0]
lambda_2 = vector[1]
lambda_3 = vector[2]
S = np.zeros((3,3))
S[0,0] = 0
S[0,1] = -lambda_3
S[0,2] = lambda_2
S[1,0] = lambda_3
S[1,1] = 0
S[1,2] = -lambda_1
S[2,0] = -lambda_2
S[2,1] = lambda_1
S[2,2] = 0
return S
def R_axis_theta(k_in, theta):
k = k_in/np.sqrt(np.sum(k_in**2))
R = np.identity(3) + np.sin(theta)*Smtrx(k) + (1-np.cos(theta))*np.dot(Smtrx(k), Smtrx(k))
return R
# Kinetics functions
def calculate_Ig(m, r_xx_CG):
''' Inertia matrix about CG. m denotes vessel mass, r_xx_CG is a (6x6)
array denoting all radii of inertia about CG.'''
Ig = np.zeros((3, 3))
for i in range(3):
for j in range(3):
Ig[i, j] = m*r_xx_CG[i, j]**2
return Ig
def calculate_Ib(m, r_xx_CG, rg_b):
''' Inertia matrix about the origin of the vessel coordinate system.
* m: vessel mass
* r_xx_CG: (6x6) matrix of radii of inertia about CG
* rg_b: position of CG expressed in BODY coordinates. Ref. Fossen 2011, page 50 (1)'''
Ig = calculate_Ig(m, r_xx_CG)
Ib = Ig - m*Smtrx(rg_b)**2
return Ib
def calculate_MRB(m, r_xx_CG, rg_b):
''' Calculate M matrix for a vessel with center of gravity at rg_b from CO, defined in BODY coordinates.
rg_b denotes the position of COG expressed in BODY coordinates ("vector to b expressed in BODY coordinates". See Fossen 2011 page 51)
'''
Ib = calculate_Ib(m, r_xx_CG, rg_b)
MRB = np.zeros((6, 6))
MRB[0:3, 0:3] = m*np.identity(3)
MRB[3:7, 0:3] = m*Smtrx(rg_b)
MRB[0:3, 3:7] = -m*Smtrx(rg_b)
MRB[3:7, 3:7] = Ib
return MRB, Ib
# def M_matrix_m_radiiOfGyration(m, r44, r55, r66, r45 = 0, r46 = 0, r56 = 0, rg_b = np.zeros(3)):
# ''' Calculate M matrix for a vessel with center of gravity at rg_b from CO, defined in body coordinates.
# NB: Not tested for rg_b != 0 and off-diagonal radii of gyration != 0
# Update: Found a bug when rg_b is not zero. Then there is no automatic update of off-diagonal terms in Ib. M_matrix_m_COG should then
# be used instead'''
# M_matrix = np.zeros((6, 6))
# Ib = np.zeros((3,3))
#
# Ib[0, 0] = m*r44**2
# Ib[1, 1] = m*r55**2
# Ib[2, 2] = m*r66**2
# Ib[0, 1] = -m*r45**2
# Ib[0, 2] = -m*r46**2
# Ib[1, 2] = -m*r56**2
# Ib[1, 0] = Ib[0, 1]
# Ib[2, 0] = Ib[0, 2]
# Ib[2, 1] = Ib[1, 2]
#
#
# M_matrix[0:3, 0:3] = m*np.identity(3)
# M_matrix[3:7, 0:3] = m*Smtrx(rg_b)
# M_matrix[0:3, 3:7] = -m*Smtrx(rg_b)
# M_matrix[3:7, 3:7] = Ib
#
# return M_matrix, Ib
def calculate_CRB(m, Ib, nu2, rg_b):
''' Calculate coriolis matrix for a vessel with center of gravity at
rg_b from CO, defined in BODY coordinates.
* m: Mass
* Ib: Inertia matrix about the BODY coordinate system origin
* nu2: vector of BODY frame angular velocities in roll, pitch and yaw respectively
* rg_b: vector to CG in BODY coordinate system '''
CRB = np.zeros((6, 6))
CRB[0:3, 0:3] = m*Smtrx(nu2)
CRB[0:3, 3:7] = -m*np.dot(Smtrx(nu2), Smtrx(rg_b))
CRB[3:7, 0:3] = m*np.dot(Smtrx(rg_b), Smtrx(nu2))
#CRB[0:3, 3:7] = -m*Smtrx(nu2)*Smtrx(rg_b)
#CRB[3:7, 0:3] = m*Smtrx(rg_b)*Smtrx(nu2)
CRB[3:7, 3:7] = -Smtrx(np.dot(Ib, nu2))
return CRB
def calculate_TTheta(euler_angles):
'''Calculate the transformation matrix from body frame angular velocities to
time derivatives of Euler angles'''
phi = euler_angles[0]
theta = euler_angles[1]
psi = euler_angles[2]
T = np.zeros((3, 3))
T[0, 0] = 1
T[0, 1] = np.sin(phi)*np.tan(theta)
T[0, 2] = np.cos(phi)*np.tan(theta)
T[1, 1] = np.cos(phi)
T[1, 2] = -np.sin(phi)
T[2, 1] = np.sin(phi)/np.cos(theta)
T[2, 2] = np.cos(phi)/np.cos(theta)
return T
# References
# (1) 2011, <NAME>. Handbook of Marine Craft Hydrodynamics and Motion Control
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.